file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
bot.js
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // bot.js is your main bot dialog entry point for handling activity types // Import required Bot Builder const { ActionTypes, ActivityTypes, CardFactory } = require('botbuilder'); const { LuisRecognizer } = require('botbuilder-ai'); const { DialogSet, WaterfallDialog } = require('botbuilder-dialogs'); const { OAuthHelpers, LOGIN_PROMPT } = require('./oauth-helpers'); const CONNECTION_SETTING_NAME = '<MS Graph API Connection Name>'; /** * Demonstrates the following concepts: * Displaying a Welcome Card, using Adaptive Card technology * Use LUIS to model Greetings, Help, and Cancel interactions * Use a Waterfall dialog to model multi-turn conversation flow * Use custom prompts to validate user input * Store conversation and user state * Handle conversation interruptions */ let luisResult = null; class BasicBot { /** * Constructs the three pieces necessary for this bot to operate: * 1. StatePropertyAccessor for conversation state * 2. StatePropertyAccess for user state * 3. LUIS client * 4. DialogSet to handle our GreetingDialog * * @param {ConversationState} conversationState property accessor * @param {application} LUISApplication property accessor * @param {luisPredictionOptions} PredictionOptions property accessor * @param {includeApiResults} APIResults Application property accessor */ constructor(conversationState, application, luisPredictionOptions, includeApiResults) { this.luisRecognizer = new LuisRecognizer(application,luisPredictionOptions, true); this.conversationState = conversationState; // DialogState property accessor. Used to keep persist DialogState when using DialogSet. this.dialogState = conversationState.createProperty('dialogState'); this.commandState = conversationState.createProperty('commandState');
this.helpMessage = `You can type "send <recipient_email>" to send an email, "recent" to view recent unread mail,` + ` "me" to see information about your, or "help" to view the commands` + ` again. For others LUIS displays intent with score.`; // Create a DialogSet that contains the OAuthPrompt. this.dialogs = new DialogSet(this.dialogState); // Add an OAuthPrompt with the connection name as specified on the Bot's settings blade in Azure. this.dialogs.add(OAuthHelpers.prompt(CONNECTION_SETTING_NAME)); this._graphDialogId = 'graphDialog'; // Logs in the user and calls proceeding dialogs, if login is successful. this.dialogs.add(new WaterfallDialog(this._graphDialogId, [ this.promptStep.bind(this), this.processStep.bind(this) ])); } /** * Driver code that does one of the following: * 1. Display a welcome card upon receiving ConversationUpdate activity * 2. Use LUIS to recognize intents for incoming user message * 3. Start a greeting dialog * 4. Optionally handle Cancel or Help interruptions * * @param {Context} turnContext turn context from the adapter */ async onTurn(turnContext) { const dc = await this.dialogs.createContext(turnContext); const results = await this.luisRecognizer.recognize(turnContext); switch (turnContext._activity.type) { case ActivityTypes.Message: this.luisResult = results; await this.processInput(dc); break; case ActivityTypes.Event: case ActivityTypes.Invoke: if (turnContext._activity.type === ActivityTypes.Invoke && turnContext._activity.channelId !== 'msteams') { throw new Error('The Invoke type is only valid on the MS Teams channel.'); }; await dc.continueDialog(); if (!turnContext.responded) { await dc.beginDialog(this._graphDialogId); }; break; case ActivityTypes.ConversationUpdate: await this.sendWelcomeMessage(turnContext); break; default: await turnContext.sendActivity(`[${ turnContext._activity.type }]-type activity detected.`); } await this.conversationState.saveChanges(turnContext); } async sendWelcomeMessage(turnContext) { const activity = turnContext.activity; if (activity && activity.membersAdded) { const heroCard = CardFactory.heroCard( 'Welcome to LUIS with MSGraph API Authentication BOT!', CardFactory.images(['https://botframeworksamples.blob.core.windows.net/samples/aadlogo.png']), CardFactory.actions([ { type: ActionTypes.ImBack, title: 'Log me in', value: 'login' }, { type: ActionTypes.ImBack, title: 'Me', value: 'me' }, { type: ActionTypes.ImBack, title: 'Recent', value: 'recent' }, { type: ActionTypes.ImBack, title: 'View Token', value: 'viewToken' }, { type: ActionTypes.ImBack, title: 'Help', value: 'help' }, { type: ActionTypes.ImBack, title: 'Signout', value: 'signout' } ]) ); for (const idx in activity.membersAdded) { if (activity.membersAdded[idx].id !== activity.recipient.id) { await turnContext.sendActivity({ attachments: [heroCard] }); } } } } async processInput(dc, luisResult) { //console.log(dc); switch (dc.context.activity.text.toLowerCase()) { case 'signout': case 'logout': case 'signoff': case 'logoff': // The bot adapter encapsulates the authentication processes and sends // activities to from the Bot Connector Service. const botAdapter = dc.context.adapter; await botAdapter.signOutUser(dc.context, CONNECTION_SETTING_NAME); // Let the user know they are signed out. await dc.context.sendActivity('You are now signed out.'); break; case 'help': await dc.context.sendActivity(this.helpMessage); break; default: // The user has input a command that has not been handled yet, // begin the waterfall dialog to handle the input. await dc.continueDialog(); if (!dc.context.responded) { await dc.beginDialog(this._graphDialogId); } } }; async promptStep(step) { const activity = step.context.activity; if (activity.type === ActivityTypes.Message && !(/\d{6}/).test(activity.text)) { await this.commandState.set(step.context, activity.text); await this.conversationState.saveChanges(step.context); } return await step.beginDialog(LOGIN_PROMPT); } async processStep(step) { //console.log(step); // We do not need to store the token in the bot. When we need the token we can // send another prompt. If the token is valid the user will not need to log back in. // The token will be available in the Result property of the task. const tokenResponse = step.result; // If the user is authenticated the bot can use the token to make API calls. if (tokenResponse !== undefined) { let parts = await this.commandState.get(step.context); if (!parts) { parts = step.context.activity.text; } const command = parts.split(' ')[0].toLowerCase(); console.log(command); if(command === 'login' || command === 'signin'){ await step.context.sendActivity(`You have already loggedin!`); } else if (command === 'me') { await OAuthHelpers.listMe(step.context, tokenResponse); } else if (command === 'send') { await OAuthHelpers.sendMail(step.context, tokenResponse, parts.split(' ')[1].toLowerCase()); } else if (command === 'recent') { await OAuthHelpers.listRecentMail(step.context, tokenResponse); } else if(command.toLowerCase() === 'viewtoken'){ await step.context.sendActivity(`Your token is: ${ tokenResponse.token }`); }else{ console.log(this.luisResult); const topIntent = this.luisResult.luisResult.topScoringIntent; if(topIntent !== 'None'){ await step.context.sendActivity(`LUIS Top Scoring Intent: ${ topIntent.intent }, Score: ${ topIntent.score }`); }else{ await step.context.sendActivity(`Please try something else!`); // If the top scoring intent was "None" tell the user no valid intents were found and provide help. // await step.context.sendActivity(`No LUIS intents were found. // \nThis sample is about identifying two user intents: // \n - 'Calendar.Add' // \n - 'Calendar.Find' // \nTry typing 'Add Event' or 'Show me tomorrow'.`); } } } else { // Ask the user to try logging in later as they are not logged in. await step.context.sendActivity(`We couldn't log you in. Please try again later.`); } return await step.endDialog(); }; }; exports.BasicBot = BasicBot;
// Instructions for the user with information about commands that this bot may handle.
random_line_split
bot.js
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // bot.js is your main bot dialog entry point for handling activity types // Import required Bot Builder const { ActionTypes, ActivityTypes, CardFactory } = require('botbuilder'); const { LuisRecognizer } = require('botbuilder-ai'); const { DialogSet, WaterfallDialog } = require('botbuilder-dialogs'); const { OAuthHelpers, LOGIN_PROMPT } = require('./oauth-helpers'); const CONNECTION_SETTING_NAME = '<MS Graph API Connection Name>'; /** * Demonstrates the following concepts: * Displaying a Welcome Card, using Adaptive Card technology * Use LUIS to model Greetings, Help, and Cancel interactions * Use a Waterfall dialog to model multi-turn conversation flow * Use custom prompts to validate user input * Store conversation and user state * Handle conversation interruptions */ let luisResult = null; class BasicBot { /** * Constructs the three pieces necessary for this bot to operate: * 1. StatePropertyAccessor for conversation state * 2. StatePropertyAccess for user state * 3. LUIS client * 4. DialogSet to handle our GreetingDialog * * @param {ConversationState} conversationState property accessor * @param {application} LUISApplication property accessor * @param {luisPredictionOptions} PredictionOptions property accessor * @param {includeApiResults} APIResults Application property accessor */ constructor(conversationState, application, luisPredictionOptions, includeApiResults) { this.luisRecognizer = new LuisRecognizer(application,luisPredictionOptions, true); this.conversationState = conversationState; // DialogState property accessor. Used to keep persist DialogState when using DialogSet. this.dialogState = conversationState.createProperty('dialogState'); this.commandState = conversationState.createProperty('commandState'); // Instructions for the user with information about commands that this bot may handle. this.helpMessage = `You can type "send <recipient_email>" to send an email, "recent" to view recent unread mail,` + ` "me" to see information about your, or "help" to view the commands` + ` again. For others LUIS displays intent with score.`; // Create a DialogSet that contains the OAuthPrompt. this.dialogs = new DialogSet(this.dialogState); // Add an OAuthPrompt with the connection name as specified on the Bot's settings blade in Azure. this.dialogs.add(OAuthHelpers.prompt(CONNECTION_SETTING_NAME)); this._graphDialogId = 'graphDialog'; // Logs in the user and calls proceeding dialogs, if login is successful. this.dialogs.add(new WaterfallDialog(this._graphDialogId, [ this.promptStep.bind(this), this.processStep.bind(this) ])); } /** * Driver code that does one of the following: * 1. Display a welcome card upon receiving ConversationUpdate activity * 2. Use LUIS to recognize intents for incoming user message * 3. Start a greeting dialog * 4. Optionally handle Cancel or Help interruptions * * @param {Context} turnContext turn context from the adapter */ async onTurn(turnContext) { const dc = await this.dialogs.createContext(turnContext); const results = await this.luisRecognizer.recognize(turnContext); switch (turnContext._activity.type) { case ActivityTypes.Message: this.luisResult = results; await this.processInput(dc); break; case ActivityTypes.Event: case ActivityTypes.Invoke: if (turnContext._activity.type === ActivityTypes.Invoke && turnContext._activity.channelId !== 'msteams') { throw new Error('The Invoke type is only valid on the MS Teams channel.'); }; await dc.continueDialog(); if (!turnContext.responded) { await dc.beginDialog(this._graphDialogId); }; break; case ActivityTypes.ConversationUpdate: await this.sendWelcomeMessage(turnContext); break; default: await turnContext.sendActivity(`[${ turnContext._activity.type }]-type activity detected.`); } await this.conversationState.saveChanges(turnContext); } async sendWelcomeMessage(turnContext) { const activity = turnContext.activity; if (activity && activity.membersAdded) { const heroCard = CardFactory.heroCard( 'Welcome to LUIS with MSGraph API Authentication BOT!', CardFactory.images(['https://botframeworksamples.blob.core.windows.net/samples/aadlogo.png']), CardFactory.actions([ { type: ActionTypes.ImBack, title: 'Log me in', value: 'login' }, { type: ActionTypes.ImBack, title: 'Me', value: 'me' }, { type: ActionTypes.ImBack, title: 'Recent', value: 'recent' }, { type: ActionTypes.ImBack, title: 'View Token', value: 'viewToken' }, { type: ActionTypes.ImBack, title: 'Help', value: 'help' }, { type: ActionTypes.ImBack, title: 'Signout', value: 'signout' } ]) ); for (const idx in activity.membersAdded) { if (activity.membersAdded[idx].id !== activity.recipient.id) { await turnContext.sendActivity({ attachments: [heroCard] }); } } } } async processInput(dc, luisResult)
; async promptStep(step) { const activity = step.context.activity; if (activity.type === ActivityTypes.Message && !(/\d{6}/).test(activity.text)) { await this.commandState.set(step.context, activity.text); await this.conversationState.saveChanges(step.context); } return await step.beginDialog(LOGIN_PROMPT); } async processStep(step) { //console.log(step); // We do not need to store the token in the bot. When we need the token we can // send another prompt. If the token is valid the user will not need to log back in. // The token will be available in the Result property of the task. const tokenResponse = step.result; // If the user is authenticated the bot can use the token to make API calls. if (tokenResponse !== undefined) { let parts = await this.commandState.get(step.context); if (!parts) { parts = step.context.activity.text; } const command = parts.split(' ')[0].toLowerCase(); console.log(command); if(command === 'login' || command === 'signin'){ await step.context.sendActivity(`You have already loggedin!`); } else if (command === 'me') { await OAuthHelpers.listMe(step.context, tokenResponse); } else if (command === 'send') { await OAuthHelpers.sendMail(step.context, tokenResponse, parts.split(' ')[1].toLowerCase()); } else if (command === 'recent') { await OAuthHelpers.listRecentMail(step.context, tokenResponse); } else if(command.toLowerCase() === 'viewtoken'){ await step.context.sendActivity(`Your token is: ${ tokenResponse.token }`); }else{ console.log(this.luisResult); const topIntent = this.luisResult.luisResult.topScoringIntent; if(topIntent !== 'None'){ await step.context.sendActivity(`LUIS Top Scoring Intent: ${ topIntent.intent }, Score: ${ topIntent.score }`); }else{ await step.context.sendActivity(`Please try something else!`); // If the top scoring intent was "None" tell the user no valid intents were found and provide help. // await step.context.sendActivity(`No LUIS intents were found. // \nThis sample is about identifying two user intents: // \n - 'Calendar.Add' // \n - 'Calendar.Find' // \nTry typing 'Add Event' or 'Show me tomorrow'.`); } } } else { // Ask the user to try logging in later as they are not logged in. await step.context.sendActivity(`We couldn't log you in. Please try again later.`); } return await step.endDialog(); }; }; exports.BasicBot = BasicBot;
{ //console.log(dc); switch (dc.context.activity.text.toLowerCase()) { case 'signout': case 'logout': case 'signoff': case 'logoff': // The bot adapter encapsulates the authentication processes and sends // activities to from the Bot Connector Service. const botAdapter = dc.context.adapter; await botAdapter.signOutUser(dc.context, CONNECTION_SETTING_NAME); // Let the user know they are signed out. await dc.context.sendActivity('You are now signed out.'); break; case 'help': await dc.context.sendActivity(this.helpMessage); break; default: // The user has input a command that has not been handled yet, // begin the waterfall dialog to handle the input. await dc.continueDialog(); if (!dc.context.responded) { await dc.beginDialog(this._graphDialogId); } } }
identifier_body
parser_manager.py
from pkg_resources import require require("numpy>=1.11.1") require("scipy>=0.19.1") import argparse import logging import re import cv2 from os.path import split, exists, isdir, isfile, join, abspath, getmtime, dirname, expanduser from os import listdir, makedirs, chmod, close import sys from CrystalMatch.dls_focusstack.focus.focus_stack_lap_pyramid import FocusStack from CrystalMatch.dls_imagematch import logconfig from CrystalMatch.dls_imagematch.service import readable_config_dir from CrystalMatch.dls_imagematch.version import VersionHandler from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir from CrystalMatch.dls_util.shape import Point from CrystalMatch.dls_util.imaging import Image class ParserManager: LOG_DIR_PERMISSION = 0o777 LOG_DIR_NAME = 'logs' LOG_FILE_NAME = 'log' FOCUSED_IMAGE_NAME = 'processed.tif' DEFAULT_SCRIPT_PATH = '.CrystalMatch' def __init__(self): self.parser = None self.images_to_stack = None self._script_path = None def build_parser(self): """Return an argument parser for the Crystal Matching service. :return: Argument parser. """ parser = argparse.ArgumentParser( description="Run Crystal Matching algorithm attempting to translate co-ordinates " "on an input image to the coordinate-space of an output image while " "accounting for possible movement of crystals in the sample.") if sys.version_info[0] < 3: parser.add_argument('Formulatrix_image', metavar="Formulatrix_image_path", type=file, help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') else: parser.add_argument('Formulatrix_image', metavar="Formulatrix_image_path", type=argparse.FileType('r'), help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') parser.add_argument('beamline_stack_path', metavar="beamline_stack_path", help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.") parser.add_argument('selected_points', metavar="x,y", nargs='*', help="Comma-separated co-ordinates of selected points to be translated from the marked image " "to the target image.") parser.add_argument('-o','--output', metavar="focused_image_path", help="Specify directory for the stacked image. " "A file called 'processed.tif' will be created in the directory." "'processed.tif' will be created in log directory if this is not set.") parser.add_argument('--config', metavar="path", action=ReadableConfigDir, default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME), help="Sets the configuration directory.") parser.add_argument('--scale', metavar="scale", help="The scale between the Formulatrix and beamline image given as the resolution of each " "image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value " "must be specified for each image using the format " "'[Formulatrix_image_resolution]:[beamline_image_resolution]'.") parser.add_argument('-j', '--job', metavar="job_id", help="Specify a job_id - this will be reported in the output to help identify this run.") parser.add_argument('--to_json', action='store_true', help="Output a JSON object.") parser.add_argument('--version', action='version', version=VersionHandler.version_string()) parser.add_argument('--log', metavar="path", help="Write log files to the directory specified by path.") self.parser = parser def get_args(self): return self.parser.parse_args() def get_config_dir(self): config_directory = self.get_args().config if config_directory is None: config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME)) return abspath(config_directory) def get_scale_override(self): scale = self.get_args().scale log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) if scale is not None: try: scales = scale.split(":") assert (len(scales) == 2) return float(scales[0]), float(scales[1]) except AssertionError: log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale))) raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale)) except ValueError: log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) raise ValueError( "Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) return None def parse_selected_points_from_args(self): """Parse the selected points list provided by the command line for validity and returns a list of Point objects. :param args: Command line arguments provided by argument parser - must contain 'selected_points' :return: List of Selected Points. """ log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) selected_points = [] if self.get_args().selected_points: point_expected_format = re.compile("[0-9]+,[0-9]+") sel_points = self.get_args().selected_points for point_string in self.get_args().selected_points: point_string = point_string.strip('()') match_results = point_expected_format.match(point_string) # Check the regex matches the entire string # DEV NOTE: can use re.full_match in Python v3 if match_results is not None and match_results.span()[1] == len(point_string): x, y = map(int, point_string.strip('()').split(',')) selected_points.append(Point(x, y)) else: log.warning("Selected point with invalid format will be ignored - '" + point_string + "'") return selected_points def get_focused_image(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: files = self._sort_files_according_to_names(focusing_path) # Run focusstack stacker = FocusStack(files, self.get_args().config) focused_image = stacker.composite() self.images_to_stack = stacker.get_fft_images_to_stack() else: focused_image = Image(cv2.imread(focusing_path)) return focused_image def get_fft_images_to_stack(self):
def get_formulatrix_image_path(self): path = self.get_args().Formulatrix_image.name self._check_is_file(path) return path def get_to_json(self): return self.get_args().to_json def get_job_id(self): return self.get_args().job # returns an error if the focused image is not saved # may want to change this for saving done later def get_focused_image_path(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: focusing_path = self.get_out_file_path() self._check_is_file(focusing_path) return abspath(focusing_path) def save_focused_image(self, image): image.save(self.get_out_file_path()) def get_out_file_path(self): """ Get the path to the output file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_output_dir() self._check_make_dirs(dir_path) return join(dir_path, self.FOCUSED_IMAGE_NAME) def get_log_file_path(self): """ Get the path to the log file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_log_file_dir() self._check_make_dirs(dir_path) return join(dir_path, self.LOG_FILE_NAME) def _get_output_dir(self): out = self.get_args().output if out is None: # default - log file directory default_output_path = self._get_log_file_dir() return default_output_path return abspath(self.get_args().output) def _get_log_file_dir(self): l = self.get_args().log if l is None: # DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME)) return default_log_path return abspath(self.get_args().log) def _check_make_dirs(self, directory): if not exists(directory) or not isdir(directory): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) try: makedirs(directory) chmod(directory, self.LOG_DIR_PERMISSION) log.info("Directory created: " + directory) except OSError: log.error("Could not create find/create directory, path may be invalid: " + directory) exit(1) @staticmethod def _check_is_file(path): if not isfile(path): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) log.error("Could not find the file, file may not been saved: " + path) exit(1) @staticmethod def _sort_files_according_to_names(focusing_path): files = [] file_names = listdir(focusing_path) if sys.version_info[0] < 3: file_names.sort(key=lambda f: int(filter(str.isdigit, f))) else: file_names.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) for file_name in file_names: name = join(focusing_path, file_name) with open(name, 'rt') as f: files.append(f) return files def set_script_path(self, path): new_path = self._if_egg_use_home(path) self._script_path = new_path def get_script_path(self): return self._script_path def _if_egg_use_home(self, path): new_path = abspath(join(path, '..')) if ".egg" in new_path: home = expanduser("~") new_path = abspath(join(home, self.DEFAULT_SCRIPT_PATH)) if not exists(new_path): makedirs(new_path) return new_path
return self.images_to_stack
identifier_body
parser_manager.py
from pkg_resources import require require("numpy>=1.11.1") require("scipy>=0.19.1") import argparse import logging import re import cv2 from os.path import split, exists, isdir, isfile, join, abspath, getmtime, dirname, expanduser from os import listdir, makedirs, chmod, close import sys from CrystalMatch.dls_focusstack.focus.focus_stack_lap_pyramid import FocusStack from CrystalMatch.dls_imagematch import logconfig from CrystalMatch.dls_imagematch.service import readable_config_dir from CrystalMatch.dls_imagematch.version import VersionHandler from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir from CrystalMatch.dls_util.shape import Point from CrystalMatch.dls_util.imaging import Image class ParserManager: LOG_DIR_PERMISSION = 0o777 LOG_DIR_NAME = 'logs' LOG_FILE_NAME = 'log' FOCUSED_IMAGE_NAME = 'processed.tif' DEFAULT_SCRIPT_PATH = '.CrystalMatch' def __init__(self): self.parser = None self.images_to_stack = None self._script_path = None def build_parser(self): """Return an argument parser for the Crystal Matching service. :return: Argument parser. """ parser = argparse.ArgumentParser( description="Run Crystal Matching algorithm attempting to translate co-ordinates " "on an input image to the coordinate-space of an output image while " "accounting for possible movement of crystals in the sample.") if sys.version_info[0] < 3: parser.add_argument('Formulatrix_image', metavar="Formulatrix_image_path", type=file, help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') else: parser.add_argument('Formulatrix_image', metavar="Formulatrix_image_path", type=argparse.FileType('r'), help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') parser.add_argument('beamline_stack_path', metavar="beamline_stack_path", help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.") parser.add_argument('selected_points', metavar="x,y", nargs='*', help="Comma-separated co-ordinates of selected points to be translated from the marked image " "to the target image.") parser.add_argument('-o','--output', metavar="focused_image_path", help="Specify directory for the stacked image. " "A file called 'processed.tif' will be created in the directory." "'processed.tif' will be created in log directory if this is not set.") parser.add_argument('--config', metavar="path", action=ReadableConfigDir, default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME), help="Sets the configuration directory.") parser.add_argument('--scale', metavar="scale", help="The scale between the Formulatrix and beamline image given as the resolution of each " "image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value " "must be specified for each image using the format " "'[Formulatrix_image_resolution]:[beamline_image_resolution]'.") parser.add_argument('-j', '--job', metavar="job_id", help="Specify a job_id - this will be reported in the output to help identify this run.") parser.add_argument('--to_json', action='store_true', help="Output a JSON object.") parser.add_argument('--version', action='version', version=VersionHandler.version_string()) parser.add_argument('--log', metavar="path", help="Write log files to the directory specified by path.") self.parser = parser def get_args(self): return self.parser.parse_args() def get_config_dir(self): config_directory = self.get_args().config if config_directory is None: config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME)) return abspath(config_directory) def get_scale_override(self): scale = self.get_args().scale log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) if scale is not None: try: scales = scale.split(":") assert (len(scales) == 2) return float(scales[0]), float(scales[1]) except AssertionError: log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale))) raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale)) except ValueError: log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) raise ValueError( "Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) return None def parse_selected_points_from_args(self): """Parse the selected points list provided by the command line for validity and returns a list of Point objects. :param args: Command line arguments provided by argument parser - must contain 'selected_points' :return: List of Selected Points. """ log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) selected_points = [] if self.get_args().selected_points: point_expected_format = re.compile("[0-9]+,[0-9]+") sel_points = self.get_args().selected_points for point_string in self.get_args().selected_points: point_string = point_string.strip('()') match_results = point_expected_format.match(point_string) # Check the regex matches the entire string # DEV NOTE: can use re.full_match in Python v3 if match_results is not None and match_results.span()[1] == len(point_string): x, y = map(int, point_string.strip('()').split(',')) selected_points.append(Point(x, y)) else: log.warning("Selected point with invalid format will be ignored - '" + point_string + "'") return selected_points def get_focused_image(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: files = self._sort_files_according_to_names(focusing_path) # Run focusstack stacker = FocusStack(files, self.get_args().config) focused_image = stacker.composite() self.images_to_stack = stacker.get_fft_images_to_stack() else: focused_image = Image(cv2.imread(focusing_path)) return focused_image def get_fft_images_to_stack(self): return self.images_to_stack def get_formulatrix_image_path(self): path = self.get_args().Formulatrix_image.name self._check_is_file(path) return path def
(self): return self.get_args().to_json def get_job_id(self): return self.get_args().job # returns an error if the focused image is not saved # may want to change this for saving done later def get_focused_image_path(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: focusing_path = self.get_out_file_path() self._check_is_file(focusing_path) return abspath(focusing_path) def save_focused_image(self, image): image.save(self.get_out_file_path()) def get_out_file_path(self): """ Get the path to the output file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_output_dir() self._check_make_dirs(dir_path) return join(dir_path, self.FOCUSED_IMAGE_NAME) def get_log_file_path(self): """ Get the path to the log file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_log_file_dir() self._check_make_dirs(dir_path) return join(dir_path, self.LOG_FILE_NAME) def _get_output_dir(self): out = self.get_args().output if out is None: # default - log file directory default_output_path = self._get_log_file_dir() return default_output_path return abspath(self.get_args().output) def _get_log_file_dir(self): l = self.get_args().log if l is None: # DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME)) return default_log_path return abspath(self.get_args().log) def _check_make_dirs(self, directory): if not exists(directory) or not isdir(directory): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) try: makedirs(directory) chmod(directory, self.LOG_DIR_PERMISSION) log.info("Directory created: " + directory) except OSError: log.error("Could not create find/create directory, path may be invalid: " + directory) exit(1) @staticmethod def _check_is_file(path): if not isfile(path): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) log.error("Could not find the file, file may not been saved: " + path) exit(1) @staticmethod def _sort_files_according_to_names(focusing_path): files = [] file_names = listdir(focusing_path) if sys.version_info[0] < 3: file_names.sort(key=lambda f: int(filter(str.isdigit, f))) else: file_names.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) for file_name in file_names: name = join(focusing_path, file_name) with open(name, 'rt') as f: files.append(f) return files def set_script_path(self, path): new_path = self._if_egg_use_home(path) self._script_path = new_path def get_script_path(self): return self._script_path def _if_egg_use_home(self, path): new_path = abspath(join(path, '..')) if ".egg" in new_path: home = expanduser("~") new_path = abspath(join(home, self.DEFAULT_SCRIPT_PATH)) if not exists(new_path): makedirs(new_path) return new_path
get_to_json
identifier_name
parser_manager.py
from pkg_resources import require require("numpy>=1.11.1") require("scipy>=0.19.1") import argparse import logging import re import cv2 from os.path import split, exists, isdir, isfile, join, abspath, getmtime, dirname, expanduser from os import listdir, makedirs, chmod, close import sys from CrystalMatch.dls_focusstack.focus.focus_stack_lap_pyramid import FocusStack from CrystalMatch.dls_imagematch import logconfig from CrystalMatch.dls_imagematch.service import readable_config_dir from CrystalMatch.dls_imagematch.version import VersionHandler from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir from CrystalMatch.dls_util.shape import Point from CrystalMatch.dls_util.imaging import Image class ParserManager: LOG_DIR_PERMISSION = 0o777 LOG_DIR_NAME = 'logs' LOG_FILE_NAME = 'log' FOCUSED_IMAGE_NAME = 'processed.tif' DEFAULT_SCRIPT_PATH = '.CrystalMatch' def __init__(self): self.parser = None self.images_to_stack = None self._script_path = None def build_parser(self): """Return an argument parser for the Crystal Matching service. :return: Argument parser. """ parser = argparse.ArgumentParser( description="Run Crystal Matching algorithm attempting to translate co-ordinates " "on an input image to the coordinate-space of an output image while " "accounting for possible movement of crystals in the sample.") if sys.version_info[0] < 3: parser.add_argument('Formulatrix_image',
type=file, help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') else: parser.add_argument('Formulatrix_image', metavar="Formulatrix_image_path", type=argparse.FileType('r'), help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') parser.add_argument('beamline_stack_path', metavar="beamline_stack_path", help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.") parser.add_argument('selected_points', metavar="x,y", nargs='*', help="Comma-separated co-ordinates of selected points to be translated from the marked image " "to the target image.") parser.add_argument('-o','--output', metavar="focused_image_path", help="Specify directory for the stacked image. " "A file called 'processed.tif' will be created in the directory." "'processed.tif' will be created in log directory if this is not set.") parser.add_argument('--config', metavar="path", action=ReadableConfigDir, default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME), help="Sets the configuration directory.") parser.add_argument('--scale', metavar="scale", help="The scale between the Formulatrix and beamline image given as the resolution of each " "image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value " "must be specified for each image using the format " "'[Formulatrix_image_resolution]:[beamline_image_resolution]'.") parser.add_argument('-j', '--job', metavar="job_id", help="Specify a job_id - this will be reported in the output to help identify this run.") parser.add_argument('--to_json', action='store_true', help="Output a JSON object.") parser.add_argument('--version', action='version', version=VersionHandler.version_string()) parser.add_argument('--log', metavar="path", help="Write log files to the directory specified by path.") self.parser = parser def get_args(self): return self.parser.parse_args() def get_config_dir(self): config_directory = self.get_args().config if config_directory is None: config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME)) return abspath(config_directory) def get_scale_override(self): scale = self.get_args().scale log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) if scale is not None: try: scales = scale.split(":") assert (len(scales) == 2) return float(scales[0]), float(scales[1]) except AssertionError: log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale))) raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale)) except ValueError: log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) raise ValueError( "Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) return None def parse_selected_points_from_args(self): """Parse the selected points list provided by the command line for validity and returns a list of Point objects. :param args: Command line arguments provided by argument parser - must contain 'selected_points' :return: List of Selected Points. """ log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) selected_points = [] if self.get_args().selected_points: point_expected_format = re.compile("[0-9]+,[0-9]+") sel_points = self.get_args().selected_points for point_string in self.get_args().selected_points: point_string = point_string.strip('()') match_results = point_expected_format.match(point_string) # Check the regex matches the entire string # DEV NOTE: can use re.full_match in Python v3 if match_results is not None and match_results.span()[1] == len(point_string): x, y = map(int, point_string.strip('()').split(',')) selected_points.append(Point(x, y)) else: log.warning("Selected point with invalid format will be ignored - '" + point_string + "'") return selected_points def get_focused_image(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: files = self._sort_files_according_to_names(focusing_path) # Run focusstack stacker = FocusStack(files, self.get_args().config) focused_image = stacker.composite() self.images_to_stack = stacker.get_fft_images_to_stack() else: focused_image = Image(cv2.imread(focusing_path)) return focused_image def get_fft_images_to_stack(self): return self.images_to_stack def get_formulatrix_image_path(self): path = self.get_args().Formulatrix_image.name self._check_is_file(path) return path def get_to_json(self): return self.get_args().to_json def get_job_id(self): return self.get_args().job # returns an error if the focused image is not saved # may want to change this for saving done later def get_focused_image_path(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: focusing_path = self.get_out_file_path() self._check_is_file(focusing_path) return abspath(focusing_path) def save_focused_image(self, image): image.save(self.get_out_file_path()) def get_out_file_path(self): """ Get the path to the output file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_output_dir() self._check_make_dirs(dir_path) return join(dir_path, self.FOCUSED_IMAGE_NAME) def get_log_file_path(self): """ Get the path to the log file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_log_file_dir() self._check_make_dirs(dir_path) return join(dir_path, self.LOG_FILE_NAME) def _get_output_dir(self): out = self.get_args().output if out is None: # default - log file directory default_output_path = self._get_log_file_dir() return default_output_path return abspath(self.get_args().output) def _get_log_file_dir(self): l = self.get_args().log if l is None: # DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME)) return default_log_path return abspath(self.get_args().log) def _check_make_dirs(self, directory): if not exists(directory) or not isdir(directory): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) try: makedirs(directory) chmod(directory, self.LOG_DIR_PERMISSION) log.info("Directory created: " + directory) except OSError: log.error("Could not create find/create directory, path may be invalid: " + directory) exit(1) @staticmethod def _check_is_file(path): if not isfile(path): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) log.error("Could not find the file, file may not been saved: " + path) exit(1) @staticmethod def _sort_files_according_to_names(focusing_path): files = [] file_names = listdir(focusing_path) if sys.version_info[0] < 3: file_names.sort(key=lambda f: int(filter(str.isdigit, f))) else: file_names.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) for file_name in file_names: name = join(focusing_path, file_name) with open(name, 'rt') as f: files.append(f) return files def set_script_path(self, path): new_path = self._if_egg_use_home(path) self._script_path = new_path def get_script_path(self): return self._script_path def _if_egg_use_home(self, path): new_path = abspath(join(path, '..')) if ".egg" in new_path: home = expanduser("~") new_path = abspath(join(home, self.DEFAULT_SCRIPT_PATH)) if not exists(new_path): makedirs(new_path) return new_path
metavar="Formulatrix_image_path",
random_line_split
parser_manager.py
from pkg_resources import require require("numpy>=1.11.1") require("scipy>=0.19.1") import argparse import logging import re import cv2 from os.path import split, exists, isdir, isfile, join, abspath, getmtime, dirname, expanduser from os import listdir, makedirs, chmod, close import sys from CrystalMatch.dls_focusstack.focus.focus_stack_lap_pyramid import FocusStack from CrystalMatch.dls_imagematch import logconfig from CrystalMatch.dls_imagematch.service import readable_config_dir from CrystalMatch.dls_imagematch.version import VersionHandler from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir from CrystalMatch.dls_util.shape import Point from CrystalMatch.dls_util.imaging import Image class ParserManager: LOG_DIR_PERMISSION = 0o777 LOG_DIR_NAME = 'logs' LOG_FILE_NAME = 'log' FOCUSED_IMAGE_NAME = 'processed.tif' DEFAULT_SCRIPT_PATH = '.CrystalMatch' def __init__(self): self.parser = None self.images_to_stack = None self._script_path = None def build_parser(self): """Return an argument parser for the Crystal Matching service. :return: Argument parser. """ parser = argparse.ArgumentParser( description="Run Crystal Matching algorithm attempting to translate co-ordinates " "on an input image to the coordinate-space of an output image while " "accounting for possible movement of crystals in the sample.") if sys.version_info[0] < 3: parser.add_argument('Formulatrix_image', metavar="Formulatrix_image_path", type=file, help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') else: parser.add_argument('Formulatrix_image', metavar="Formulatrix_image_path", type=argparse.FileType('r'), help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on ' 'this image.') parser.add_argument('beamline_stack_path', metavar="beamline_stack_path", help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.") parser.add_argument('selected_points', metavar="x,y", nargs='*', help="Comma-separated co-ordinates of selected points to be translated from the marked image " "to the target image.") parser.add_argument('-o','--output', metavar="focused_image_path", help="Specify directory for the stacked image. " "A file called 'processed.tif' will be created in the directory." "'processed.tif' will be created in log directory if this is not set.") parser.add_argument('--config', metavar="path", action=ReadableConfigDir, default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME), help="Sets the configuration directory.") parser.add_argument('--scale', metavar="scale", help="The scale between the Formulatrix and beamline image given as the resolution of each " "image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value " "must be specified for each image using the format " "'[Formulatrix_image_resolution]:[beamline_image_resolution]'.") parser.add_argument('-j', '--job', metavar="job_id", help="Specify a job_id - this will be reported in the output to help identify this run.") parser.add_argument('--to_json', action='store_true', help="Output a JSON object.") parser.add_argument('--version', action='version', version=VersionHandler.version_string()) parser.add_argument('--log', metavar="path", help="Write log files to the directory specified by path.") self.parser = parser def get_args(self): return self.parser.parse_args() def get_config_dir(self): config_directory = self.get_args().config if config_directory is None: config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME)) return abspath(config_directory) def get_scale_override(self): scale = self.get_args().scale log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) if scale is not None: try: scales = scale.split(":") assert (len(scales) == 2) return float(scales[0]), float(scales[1]) except AssertionError: log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale))) raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " + str(scale)) except ValueError: log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) raise ValueError( "Scale must be given as a pair of float values separated by a colon (':'). Value given: " + str(scale)) return None def parse_selected_points_from_args(self): """Parse the selected points list provided by the command line for validity and returns a list of Point objects. :param args: Command line arguments provided by argument parser - must contain 'selected_points' :return: List of Selected Points. """ log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) selected_points = [] if self.get_args().selected_points: point_expected_format = re.compile("[0-9]+,[0-9]+") sel_points = self.get_args().selected_points for point_string in self.get_args().selected_points:
return selected_points def get_focused_image(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: files = self._sort_files_according_to_names(focusing_path) # Run focusstack stacker = FocusStack(files, self.get_args().config) focused_image = stacker.composite() self.images_to_stack = stacker.get_fft_images_to_stack() else: focused_image = Image(cv2.imread(focusing_path)) return focused_image def get_fft_images_to_stack(self): return self.images_to_stack def get_formulatrix_image_path(self): path = self.get_args().Formulatrix_image.name self._check_is_file(path) return path def get_to_json(self): return self.get_args().to_json def get_job_id(self): return self.get_args().job # returns an error if the focused image is not saved # may want to change this for saving done later def get_focused_image_path(self): focusing_path = abspath(self.get_args().beamline_stack_path) if "." not in focusing_path: focusing_path = self.get_out_file_path() self._check_is_file(focusing_path) return abspath(focusing_path) def save_focused_image(self, image): image.save(self.get_out_file_path()) def get_out_file_path(self): """ Get the path to the output file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_output_dir() self._check_make_dirs(dir_path) return join(dir_path, self.FOCUSED_IMAGE_NAME) def get_log_file_path(self): """ Get the path to the log file based on the contents of the config file and the location of the configuration dir. :return: A string representing the file path of the log file. """ dir_path = self._get_log_file_dir() self._check_make_dirs(dir_path) return join(dir_path, self.LOG_FILE_NAME) def _get_output_dir(self): out = self.get_args().output if out is None: # default - log file directory default_output_path = self._get_log_file_dir() return default_output_path return abspath(self.get_args().output) def _get_log_file_dir(self): l = self.get_args().log if l is None: # DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME)) return default_log_path return abspath(self.get_args().log) def _check_make_dirs(self, directory): if not exists(directory) or not isdir(directory): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) try: makedirs(directory) chmod(directory, self.LOG_DIR_PERMISSION) log.info("Directory created: " + directory) except OSError: log.error("Could not create find/create directory, path may be invalid: " + directory) exit(1) @staticmethod def _check_is_file(path): if not isfile(path): log = logging.getLogger(".".join([__name__])) log.addFilter(logconfig.ThreadContextFilter()) log.error("Could not find the file, file may not been saved: " + path) exit(1) @staticmethod def _sort_files_according_to_names(focusing_path): files = [] file_names = listdir(focusing_path) if sys.version_info[0] < 3: file_names.sort(key=lambda f: int(filter(str.isdigit, f))) else: file_names.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) for file_name in file_names: name = join(focusing_path, file_name) with open(name, 'rt') as f: files.append(f) return files def set_script_path(self, path): new_path = self._if_egg_use_home(path) self._script_path = new_path def get_script_path(self): return self._script_path def _if_egg_use_home(self, path): new_path = abspath(join(path, '..')) if ".egg" in new_path: home = expanduser("~") new_path = abspath(join(home, self.DEFAULT_SCRIPT_PATH)) if not exists(new_path): makedirs(new_path) return new_path
point_string = point_string.strip('()') match_results = point_expected_format.match(point_string) # Check the regex matches the entire string # DEV NOTE: can use re.full_match in Python v3 if match_results is not None and match_results.span()[1] == len(point_string): x, y = map(int, point_string.strip('()').split(',')) selected_points.append(Point(x, y)) else: log.warning("Selected point with invalid format will be ignored - '" + point_string + "'")
conditional_block
certificate_fetcher.go
package rancher import ( "fmt" "io/ioutil" "os" "path" "path/filepath" "reflect" "sync" "time" "github.com/Sirupsen/logrus" "github.com/rancher/go-rancher-metadata/metadata" "github.com/rancher/go-rancher/v2" "github.com/rancher/lb-controller/config" ) const ( DefaultCertName = "fullchain.pem" DefaultKeyName = "privkey.pem" ) type CertificateFetcher interface { FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error LookForCertUpdates(do func(string)) } type RCertificateFetcher struct { Client *client.RancherClient CertDir string DefaultCertDir string CertsCache map[string]*config.Certificate //cert name (sub dir name) -> cert DefaultCert *config.Certificate tempCertsMap map[string]*config.Certificate //cert name (sub dir name) -> cert updateCheckInterval int forceUpdateInterval float64 mu *sync.RWMutex CertName string KeyName string initPollDone bool initPollMu *sync.RWMutex } func (fetcher *RCertificateFetcher) checkIfInitPollDone() bool
func (fetcher *RCertificateFetcher) setInitPollDone() { fetcher.initPollMu.Lock() fetcher.initPollDone = true fetcher.initPollMu.Unlock() } func (fetcher *RCertificateFetcher) FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) { // fetch certificates either from mounted certDir or from cattle certs := []*config.Certificate{} var defaultCert *config.Certificate if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { for { if fetcher.checkIfInitPollDone() { if isDefaultCert { if fetcher.DefaultCertDir != "" { logrus.Debugf("Found defaultCertDir label %v", fetcher.DefaultCertDir) defaultCert = fetcher.ReadDefaultCertificate(fetcher.DefaultCertDir) if defaultCert != nil { certs = append(certs, defaultCert) } } } else { //read all the certificates from the mounted certDir if fetcher.CertDir != "" { logrus.Debugf("Found certDir label %v", fetcher.CertDir) certsFromDir := fetcher.ReadAllCertificatesFromDir(fetcher.CertDir) certs = append(certs, certsFromDir...) } } break } else { logrus.Debugf("Waiting for InitPollDone()") time.Sleep(time.Duration(2) * time.Second) } } } else { if !isDefaultCert { for _, certID := range lbMeta.CertificateIDs { cert, err := fetcher.FetchRancherCertificate(certID) if err != nil { return nil, err } certs = append(certs, cert) } } else { if lbMeta.DefaultCertificateID != "" { var err error defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID) if err != nil { return nil, err } if defaultCert != nil { certs = append(certs, defaultCert) } } } } return certs, nil } func (fetcher *RCertificateFetcher) FetchRancherCertificate(certID string) (*config.Certificate, error) { if certID == "" { return nil, nil } opts := client.NewListOpts() opts.Filters["id"] = certID opts.Filters["removed_null"] = "1" cert, err := fetcher.Client.Certificate.ById(certID) if err != nil { return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err) } if cert == nil { return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID) } certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain) return &config.Certificate{ Name: cert.Name, Key: cert.Key, Cert: certWithChain, }, nil } func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error { opts := client.NewListOpts() opts.Filters["uuid"] = lbSvc.UUID opts.Filters["removed_null"] = "1" lbs, err := fetcher.Client.LoadBalancerService.List(opts) if err != nil { return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err) } if len(lbs.Data) == 0 { logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID) return nil } lb := lbs.Data[0] toUpdate := make(map[string]interface{}) toUpdate["publicEndpoints"] = eps logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps) if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil { return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err) } return nil } func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate { certs := []*config.Certificate{} fetcher.mu.RLock() for _, value := range fetcher.CertsCache { certs = append(certs, value) } fetcher.mu.RUnlock() return certs } func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate { var currentDefCert *config.Certificate fetcher.mu.RLock() currentDefCert = fetcher.DefaultCert fetcher.mu.RUnlock() return currentDefCert } func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) { if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { lastUpdated := time.Now() for { logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir) forceUpdate := false certsUpdatedFlag := false logrus.Debugf("lastUpdated %v", lastUpdated) if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval { logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval) forceUpdate = true } //read the certs from the dir into tempMap if fetcher.CertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir) } else { //compare with existing cache if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) { if !forceUpdate { logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir) } else { logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir) } //there is some change, refresh certs fetcher.mu.Lock() fetcher.CertsCache = make(map[string]*config.Certificate) for path, newCert := range fetcher.tempCertsMap { fetcher.CertsCache[path] = newCert logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name) } certsUpdatedFlag = true fetcher.mu.Unlock() } } } //read the cert from the defaultCertDir into tempMap if fetcher.DefaultCertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir) } else { var tempDefCert *config.Certificate for _, cert := range fetcher.tempCertsMap { tempDefCert = cert } //compare with existing default cert if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) { fetcher.mu.Lock() fetcher.DefaultCert = tempDefCert certsUpdatedFlag = true fetcher.mu.Unlock() } } } if certsUpdatedFlag { //scheduleApplyConfig doOnUpdate("") lastUpdated = time.Now() } if !fetcher.checkIfInitPollDone() { fetcher.setInitPollDone() } logrus.Debug("Done --- LookForCertUpdates poll") time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second) } } } func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error { if f != nil && f.IsDir() { if err != nil { return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err) } logrus.Debugf("Walking dir %v", path) isCertFound := false isKeyFound := false cert := config.Certificate{} cert.Name = f.Name() files, err := ioutil.ReadDir(path) if err != nil { return err } for _, file := range files { if !file.IsDir() { contentBytes, err := fetcher.evaluatueLinkAndReadFile(path, file.Name()) if err != nil { logrus.Errorf("Error while reading file [%v]. Error: %v", file.Name(), err) } else { if file.Name() == fetcher.CertName { isCertFound = true cert.Cert = string(*contentBytes) } else if file.Name() == fetcher.KeyName { isKeyFound = true cert.Key = string(*contentBytes) } } } } if isCertFound && isKeyFound { fetcher.tempCertsMap[path] = &cert } else if isCertFound || isKeyFound { logrus.Warnf("Skipping incomplete cert found under dir [%v], [isCertFound %v] [isKeyFound %v]", path, isCertFound, isKeyFound) } } return nil } func (fetcher *RCertificateFetcher) evaluatueLinkAndReadFile(relativePath string, fileName string) (*[]byte, error) { filePath := path.Join(relativePath, fileName) absFilePath, err := filepath.Abs(filePath) if err != nil { return nil, fmt.Errorf("Error forming path to file %s, error: %v", filePath, err) } fInfo, err := os.Lstat(absFilePath) if os.IsNotExist(err) { return nil, fmt.Errorf("File %s does not exist", absFilePath) } targetPath := absFilePath if fInfo.Mode()&os.ModeSymlink != 0 { //read symlink targetPath, err := filepath.EvalSymlinks(absFilePath) if err != nil { return nil, fmt.Errorf("File %s pointed by symlink %s does not exist, error: %v", targetPath, absFilePath, err) } } //read target file return fetcher.readFile(targetPath) } func (fetcher *RCertificateFetcher) readFile(filePath string) (*[]byte, error) { contentBytes, err := ioutil.ReadFile(filePath) if err != nil { return nil, fmt.Errorf("Error reading file %s, error: %v", filePath, err) } return &contentBytes, nil }
{ isDone := false fetcher.initPollMu.RLock() isDone = fetcher.initPollDone fetcher.initPollMu.RUnlock() return isDone }
identifier_body
certificate_fetcher.go
package rancher import ( "fmt" "io/ioutil" "os" "path" "path/filepath" "reflect" "sync" "time" "github.com/Sirupsen/logrus" "github.com/rancher/go-rancher-metadata/metadata" "github.com/rancher/go-rancher/v2" "github.com/rancher/lb-controller/config" ) const ( DefaultCertName = "fullchain.pem" DefaultKeyName = "privkey.pem" ) type CertificateFetcher interface { FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error LookForCertUpdates(do func(string)) } type RCertificateFetcher struct { Client *client.RancherClient CertDir string DefaultCertDir string CertsCache map[string]*config.Certificate //cert name (sub dir name) -> cert DefaultCert *config.Certificate tempCertsMap map[string]*config.Certificate //cert name (sub dir name) -> cert updateCheckInterval int forceUpdateInterval float64 mu *sync.RWMutex CertName string KeyName string initPollDone bool initPollMu *sync.RWMutex } func (fetcher *RCertificateFetcher) checkIfInitPollDone() bool { isDone := false fetcher.initPollMu.RLock() isDone = fetcher.initPollDone fetcher.initPollMu.RUnlock() return isDone } func (fetcher *RCertificateFetcher) setInitPollDone() { fetcher.initPollMu.Lock() fetcher.initPollDone = true fetcher.initPollMu.Unlock() } func (fetcher *RCertificateFetcher) FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) { // fetch certificates either from mounted certDir or from cattle certs := []*config.Certificate{} var defaultCert *config.Certificate if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { for { if fetcher.checkIfInitPollDone() { if isDefaultCert { if fetcher.DefaultCertDir != "" { logrus.Debugf("Found defaultCertDir label %v", fetcher.DefaultCertDir) defaultCert = fetcher.ReadDefaultCertificate(fetcher.DefaultCertDir) if defaultCert != nil { certs = append(certs, defaultCert) } } } else { //read all the certificates from the mounted certDir if fetcher.CertDir != "" { logrus.Debugf("Found certDir label %v", fetcher.CertDir) certsFromDir := fetcher.ReadAllCertificatesFromDir(fetcher.CertDir) certs = append(certs, certsFromDir...) } } break } else { logrus.Debugf("Waiting for InitPollDone()") time.Sleep(time.Duration(2) * time.Second) } } } else { if !isDefaultCert { for _, certID := range lbMeta.CertificateIDs { cert, err := fetcher.FetchRancherCertificate(certID) if err != nil { return nil, err } certs = append(certs, cert) } } else { if lbMeta.DefaultCertificateID != "" { var err error defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID) if err != nil { return nil, err } if defaultCert != nil { certs = append(certs, defaultCert) } } } } return certs, nil } func (fetcher *RCertificateFetcher) FetchRancherCertificate(certID string) (*config.Certificate, error) { if certID == "" { return nil, nil } opts := client.NewListOpts() opts.Filters["id"] = certID opts.Filters["removed_null"] = "1" cert, err := fetcher.Client.Certificate.ById(certID) if err != nil { return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err) } if cert == nil { return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID) } certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain) return &config.Certificate{ Name: cert.Name, Key: cert.Key, Cert: certWithChain, }, nil } func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error { opts := client.NewListOpts() opts.Filters["uuid"] = lbSvc.UUID opts.Filters["removed_null"] = "1" lbs, err := fetcher.Client.LoadBalancerService.List(opts) if err != nil { return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err) } if len(lbs.Data) == 0 { logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID) return nil } lb := lbs.Data[0] toUpdate := make(map[string]interface{}) toUpdate["publicEndpoints"] = eps logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps) if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil { return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err) } return nil } func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate { certs := []*config.Certificate{} fetcher.mu.RLock() for _, value := range fetcher.CertsCache { certs = append(certs, value) } fetcher.mu.RUnlock() return certs } func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate { var currentDefCert *config.Certificate fetcher.mu.RLock() currentDefCert = fetcher.DefaultCert fetcher.mu.RUnlock() return currentDefCert } func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) { if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { lastUpdated := time.Now() for { logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir) forceUpdate := false certsUpdatedFlag := false logrus.Debugf("lastUpdated %v", lastUpdated) if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval { logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval) forceUpdate = true } //read the certs from the dir into tempMap if fetcher.CertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir)
} else { logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir) } //there is some change, refresh certs fetcher.mu.Lock() fetcher.CertsCache = make(map[string]*config.Certificate) for path, newCert := range fetcher.tempCertsMap { fetcher.CertsCache[path] = newCert logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name) } certsUpdatedFlag = true fetcher.mu.Unlock() } } } //read the cert from the defaultCertDir into tempMap if fetcher.DefaultCertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir) } else { var tempDefCert *config.Certificate for _, cert := range fetcher.tempCertsMap { tempDefCert = cert } //compare with existing default cert if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) { fetcher.mu.Lock() fetcher.DefaultCert = tempDefCert certsUpdatedFlag = true fetcher.mu.Unlock() } } } if certsUpdatedFlag { //scheduleApplyConfig doOnUpdate("") lastUpdated = time.Now() } if !fetcher.checkIfInitPollDone() { fetcher.setInitPollDone() } logrus.Debug("Done --- LookForCertUpdates poll") time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second) } } } func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error { if f != nil && f.IsDir() { if err != nil { return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err) } logrus.Debugf("Walking dir %v", path) isCertFound := false isKeyFound := false cert := config.Certificate{} cert.Name = f.Name() files, err := ioutil.ReadDir(path) if err != nil { return err } for _, file := range files { if !file.IsDir() { contentBytes, err := fetcher.evaluatueLinkAndReadFile(path, file.Name()) if err != nil { logrus.Errorf("Error while reading file [%v]. Error: %v", file.Name(), err) } else { if file.Name() == fetcher.CertName { isCertFound = true cert.Cert = string(*contentBytes) } else if file.Name() == fetcher.KeyName { isKeyFound = true cert.Key = string(*contentBytes) } } } } if isCertFound && isKeyFound { fetcher.tempCertsMap[path] = &cert } else if isCertFound || isKeyFound { logrus.Warnf("Skipping incomplete cert found under dir [%v], [isCertFound %v] [isKeyFound %v]", path, isCertFound, isKeyFound) } } return nil } func (fetcher *RCertificateFetcher) evaluatueLinkAndReadFile(relativePath string, fileName string) (*[]byte, error) { filePath := path.Join(relativePath, fileName) absFilePath, err := filepath.Abs(filePath) if err != nil { return nil, fmt.Errorf("Error forming path to file %s, error: %v", filePath, err) } fInfo, err := os.Lstat(absFilePath) if os.IsNotExist(err) { return nil, fmt.Errorf("File %s does not exist", absFilePath) } targetPath := absFilePath if fInfo.Mode()&os.ModeSymlink != 0 { //read symlink targetPath, err := filepath.EvalSymlinks(absFilePath) if err != nil { return nil, fmt.Errorf("File %s pointed by symlink %s does not exist, error: %v", targetPath, absFilePath, err) } } //read target file return fetcher.readFile(targetPath) } func (fetcher *RCertificateFetcher) readFile(filePath string) (*[]byte, error) { contentBytes, err := ioutil.ReadFile(filePath) if err != nil { return nil, fmt.Errorf("Error reading file %s, error: %v", filePath, err) } return &contentBytes, nil }
} else { //compare with existing cache if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) { if !forceUpdate { logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir)
random_line_split
certificate_fetcher.go
package rancher import ( "fmt" "io/ioutil" "os" "path" "path/filepath" "reflect" "sync" "time" "github.com/Sirupsen/logrus" "github.com/rancher/go-rancher-metadata/metadata" "github.com/rancher/go-rancher/v2" "github.com/rancher/lb-controller/config" ) const ( DefaultCertName = "fullchain.pem" DefaultKeyName = "privkey.pem" ) type CertificateFetcher interface { FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error LookForCertUpdates(do func(string)) } type RCertificateFetcher struct { Client *client.RancherClient CertDir string DefaultCertDir string CertsCache map[string]*config.Certificate //cert name (sub dir name) -> cert DefaultCert *config.Certificate tempCertsMap map[string]*config.Certificate //cert name (sub dir name) -> cert updateCheckInterval int forceUpdateInterval float64 mu *sync.RWMutex CertName string KeyName string initPollDone bool initPollMu *sync.RWMutex } func (fetcher *RCertificateFetcher) checkIfInitPollDone() bool { isDone := false fetcher.initPollMu.RLock() isDone = fetcher.initPollDone fetcher.initPollMu.RUnlock() return isDone } func (fetcher *RCertificateFetcher) setInitPollDone() { fetcher.initPollMu.Lock() fetcher.initPollDone = true fetcher.initPollMu.Unlock() } func (fetcher *RCertificateFetcher) FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) { // fetch certificates either from mounted certDir or from cattle certs := []*config.Certificate{} var defaultCert *config.Certificate if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { for { if fetcher.checkIfInitPollDone() { if isDefaultCert { if fetcher.DefaultCertDir != "" { logrus.Debugf("Found defaultCertDir label %v", fetcher.DefaultCertDir) defaultCert = fetcher.ReadDefaultCertificate(fetcher.DefaultCertDir) if defaultCert != nil { certs = append(certs, defaultCert) } } } else { //read all the certificates from the mounted certDir if fetcher.CertDir != "" { logrus.Debugf("Found certDir label %v", fetcher.CertDir) certsFromDir := fetcher.ReadAllCertificatesFromDir(fetcher.CertDir) certs = append(certs, certsFromDir...) } } break } else { logrus.Debugf("Waiting for InitPollDone()") time.Sleep(time.Duration(2) * time.Second) } } } else { if !isDefaultCert { for _, certID := range lbMeta.CertificateIDs { cert, err := fetcher.FetchRancherCertificate(certID) if err != nil { return nil, err } certs = append(certs, cert) } } else { if lbMeta.DefaultCertificateID != "" { var err error defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID) if err != nil { return nil, err } if defaultCert != nil { certs = append(certs, defaultCert) } } } } return certs, nil } func (fetcher *RCertificateFetcher)
(certID string) (*config.Certificate, error) { if certID == "" { return nil, nil } opts := client.NewListOpts() opts.Filters["id"] = certID opts.Filters["removed_null"] = "1" cert, err := fetcher.Client.Certificate.ById(certID) if err != nil { return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err) } if cert == nil { return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID) } certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain) return &config.Certificate{ Name: cert.Name, Key: cert.Key, Cert: certWithChain, }, nil } func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error { opts := client.NewListOpts() opts.Filters["uuid"] = lbSvc.UUID opts.Filters["removed_null"] = "1" lbs, err := fetcher.Client.LoadBalancerService.List(opts) if err != nil { return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err) } if len(lbs.Data) == 0 { logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID) return nil } lb := lbs.Data[0] toUpdate := make(map[string]interface{}) toUpdate["publicEndpoints"] = eps logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps) if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil { return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err) } return nil } func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate { certs := []*config.Certificate{} fetcher.mu.RLock() for _, value := range fetcher.CertsCache { certs = append(certs, value) } fetcher.mu.RUnlock() return certs } func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate { var currentDefCert *config.Certificate fetcher.mu.RLock() currentDefCert = fetcher.DefaultCert fetcher.mu.RUnlock() return currentDefCert } func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) { if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { lastUpdated := time.Now() for { logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir) forceUpdate := false certsUpdatedFlag := false logrus.Debugf("lastUpdated %v", lastUpdated) if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval { logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval) forceUpdate = true } //read the certs from the dir into tempMap if fetcher.CertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir) } else { //compare with existing cache if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) { if !forceUpdate { logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir) } else { logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir) } //there is some change, refresh certs fetcher.mu.Lock() fetcher.CertsCache = make(map[string]*config.Certificate) for path, newCert := range fetcher.tempCertsMap { fetcher.CertsCache[path] = newCert logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name) } certsUpdatedFlag = true fetcher.mu.Unlock() } } } //read the cert from the defaultCertDir into tempMap if fetcher.DefaultCertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir) } else { var tempDefCert *config.Certificate for _, cert := range fetcher.tempCertsMap { tempDefCert = cert } //compare with existing default cert if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) { fetcher.mu.Lock() fetcher.DefaultCert = tempDefCert certsUpdatedFlag = true fetcher.mu.Unlock() } } } if certsUpdatedFlag { //scheduleApplyConfig doOnUpdate("") lastUpdated = time.Now() } if !fetcher.checkIfInitPollDone() { fetcher.setInitPollDone() } logrus.Debug("Done --- LookForCertUpdates poll") time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second) } } } func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error { if f != nil && f.IsDir() { if err != nil { return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err) } logrus.Debugf("Walking dir %v", path) isCertFound := false isKeyFound := false cert := config.Certificate{} cert.Name = f.Name() files, err := ioutil.ReadDir(path) if err != nil { return err } for _, file := range files { if !file.IsDir() { contentBytes, err := fetcher.evaluatueLinkAndReadFile(path, file.Name()) if err != nil { logrus.Errorf("Error while reading file [%v]. Error: %v", file.Name(), err) } else { if file.Name() == fetcher.CertName { isCertFound = true cert.Cert = string(*contentBytes) } else if file.Name() == fetcher.KeyName { isKeyFound = true cert.Key = string(*contentBytes) } } } } if isCertFound && isKeyFound { fetcher.tempCertsMap[path] = &cert } else if isCertFound || isKeyFound { logrus.Warnf("Skipping incomplete cert found under dir [%v], [isCertFound %v] [isKeyFound %v]", path, isCertFound, isKeyFound) } } return nil } func (fetcher *RCertificateFetcher) evaluatueLinkAndReadFile(relativePath string, fileName string) (*[]byte, error) { filePath := path.Join(relativePath, fileName) absFilePath, err := filepath.Abs(filePath) if err != nil { return nil, fmt.Errorf("Error forming path to file %s, error: %v", filePath, err) } fInfo, err := os.Lstat(absFilePath) if os.IsNotExist(err) { return nil, fmt.Errorf("File %s does not exist", absFilePath) } targetPath := absFilePath if fInfo.Mode()&os.ModeSymlink != 0 { //read symlink targetPath, err := filepath.EvalSymlinks(absFilePath) if err != nil { return nil, fmt.Errorf("File %s pointed by symlink %s does not exist, error: %v", targetPath, absFilePath, err) } } //read target file return fetcher.readFile(targetPath) } func (fetcher *RCertificateFetcher) readFile(filePath string) (*[]byte, error) { contentBytes, err := ioutil.ReadFile(filePath) if err != nil { return nil, fmt.Errorf("Error reading file %s, error: %v", filePath, err) } return &contentBytes, nil }
FetchRancherCertificate
identifier_name
certificate_fetcher.go
package rancher import ( "fmt" "io/ioutil" "os" "path" "path/filepath" "reflect" "sync" "time" "github.com/Sirupsen/logrus" "github.com/rancher/go-rancher-metadata/metadata" "github.com/rancher/go-rancher/v2" "github.com/rancher/lb-controller/config" ) const ( DefaultCertName = "fullchain.pem" DefaultKeyName = "privkey.pem" ) type CertificateFetcher interface { FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error LookForCertUpdates(do func(string)) } type RCertificateFetcher struct { Client *client.RancherClient CertDir string DefaultCertDir string CertsCache map[string]*config.Certificate //cert name (sub dir name) -> cert DefaultCert *config.Certificate tempCertsMap map[string]*config.Certificate //cert name (sub dir name) -> cert updateCheckInterval int forceUpdateInterval float64 mu *sync.RWMutex CertName string KeyName string initPollDone bool initPollMu *sync.RWMutex } func (fetcher *RCertificateFetcher) checkIfInitPollDone() bool { isDone := false fetcher.initPollMu.RLock() isDone = fetcher.initPollDone fetcher.initPollMu.RUnlock() return isDone } func (fetcher *RCertificateFetcher) setInitPollDone() { fetcher.initPollMu.Lock() fetcher.initPollDone = true fetcher.initPollMu.Unlock() } func (fetcher *RCertificateFetcher) FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) { // fetch certificates either from mounted certDir or from cattle certs := []*config.Certificate{} var defaultCert *config.Certificate if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { for { if fetcher.checkIfInitPollDone() { if isDefaultCert { if fetcher.DefaultCertDir != "" { logrus.Debugf("Found defaultCertDir label %v", fetcher.DefaultCertDir) defaultCert = fetcher.ReadDefaultCertificate(fetcher.DefaultCertDir) if defaultCert != nil { certs = append(certs, defaultCert) } } } else { //read all the certificates from the mounted certDir if fetcher.CertDir != "" { logrus.Debugf("Found certDir label %v", fetcher.CertDir) certsFromDir := fetcher.ReadAllCertificatesFromDir(fetcher.CertDir) certs = append(certs, certsFromDir...) } } break } else { logrus.Debugf("Waiting for InitPollDone()") time.Sleep(time.Duration(2) * time.Second) } } } else { if !isDefaultCert { for _, certID := range lbMeta.CertificateIDs { cert, err := fetcher.FetchRancherCertificate(certID) if err != nil { return nil, err } certs = append(certs, cert) } } else { if lbMeta.DefaultCertificateID != "" { var err error defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID) if err != nil { return nil, err } if defaultCert != nil { certs = append(certs, defaultCert) } } } } return certs, nil } func (fetcher *RCertificateFetcher) FetchRancherCertificate(certID string) (*config.Certificate, error) { if certID == "" { return nil, nil } opts := client.NewListOpts() opts.Filters["id"] = certID opts.Filters["removed_null"] = "1" cert, err := fetcher.Client.Certificate.ById(certID) if err != nil { return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err) } if cert == nil { return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID) } certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain) return &config.Certificate{ Name: cert.Name, Key: cert.Key, Cert: certWithChain, }, nil } func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error { opts := client.NewListOpts() opts.Filters["uuid"] = lbSvc.UUID opts.Filters["removed_null"] = "1" lbs, err := fetcher.Client.LoadBalancerService.List(opts) if err != nil { return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err) } if len(lbs.Data) == 0 { logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID) return nil } lb := lbs.Data[0] toUpdate := make(map[string]interface{}) toUpdate["publicEndpoints"] = eps logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps) if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil { return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err) } return nil } func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate { certs := []*config.Certificate{} fetcher.mu.RLock() for _, value := range fetcher.CertsCache { certs = append(certs, value) } fetcher.mu.RUnlock() return certs } func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate { var currentDefCert *config.Certificate fetcher.mu.RLock() currentDefCert = fetcher.DefaultCert fetcher.mu.RUnlock() return currentDefCert } func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) { if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" { lastUpdated := time.Now() for { logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir) forceUpdate := false certsUpdatedFlag := false logrus.Debugf("lastUpdated %v", lastUpdated) if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval { logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval) forceUpdate = true } //read the certs from the dir into tempMap if fetcher.CertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir) } else { //compare with existing cache if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) { if !forceUpdate { logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir) } else { logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir) } //there is some change, refresh certs fetcher.mu.Lock() fetcher.CertsCache = make(map[string]*config.Certificate) for path, newCert := range fetcher.tempCertsMap { fetcher.CertsCache[path] = newCert logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name) } certsUpdatedFlag = true fetcher.mu.Unlock() } } } //read the cert from the defaultCertDir into tempMap if fetcher.DefaultCertDir != "" { fetcher.tempCertsMap = make(map[string]*config.Certificate) err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate) if err != nil { logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir) } else { var tempDefCert *config.Certificate for _, cert := range fetcher.tempCertsMap { tempDefCert = cert } //compare with existing default cert if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) { fetcher.mu.Lock() fetcher.DefaultCert = tempDefCert certsUpdatedFlag = true fetcher.mu.Unlock() } } } if certsUpdatedFlag { //scheduleApplyConfig doOnUpdate("") lastUpdated = time.Now() } if !fetcher.checkIfInitPollDone() { fetcher.setInitPollDone() } logrus.Debug("Done --- LookForCertUpdates poll") time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second) } } } func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error { if f != nil && f.IsDir() { if err != nil { return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err) } logrus.Debugf("Walking dir %v", path) isCertFound := false isKeyFound := false cert := config.Certificate{} cert.Name = f.Name() files, err := ioutil.ReadDir(path) if err != nil { return err } for _, file := range files { if !file.IsDir() { contentBytes, err := fetcher.evaluatueLinkAndReadFile(path, file.Name()) if err != nil { logrus.Errorf("Error while reading file [%v]. Error: %v", file.Name(), err) } else { if file.Name() == fetcher.CertName { isCertFound = true cert.Cert = string(*contentBytes) } else if file.Name() == fetcher.KeyName { isKeyFound = true cert.Key = string(*contentBytes) } } } } if isCertFound && isKeyFound { fetcher.tempCertsMap[path] = &cert } else if isCertFound || isKeyFound { logrus.Warnf("Skipping incomplete cert found under dir [%v], [isCertFound %v] [isKeyFound %v]", path, isCertFound, isKeyFound) } } return nil } func (fetcher *RCertificateFetcher) evaluatueLinkAndReadFile(relativePath string, fileName string) (*[]byte, error) { filePath := path.Join(relativePath, fileName) absFilePath, err := filepath.Abs(filePath) if err != nil { return nil, fmt.Errorf("Error forming path to file %s, error: %v", filePath, err) } fInfo, err := os.Lstat(absFilePath) if os.IsNotExist(err) { return nil, fmt.Errorf("File %s does not exist", absFilePath) } targetPath := absFilePath if fInfo.Mode()&os.ModeSymlink != 0 { //read symlink targetPath, err := filepath.EvalSymlinks(absFilePath) if err != nil
} //read target file return fetcher.readFile(targetPath) } func (fetcher *RCertificateFetcher) readFile(filePath string) (*[]byte, error) { contentBytes, err := ioutil.ReadFile(filePath) if err != nil { return nil, fmt.Errorf("Error reading file %s, error: %v", filePath, err) } return &contentBytes, nil }
{ return nil, fmt.Errorf("File %s pointed by symlink %s does not exist, error: %v", targetPath, absFilePath, err) }
conditional_block
ProteinRNN.py
# Here we import the modules that we will use for the task import numpy as np import math import statistics import tensorflow as tf import string import random import matplotlib.pyplot as plt from tensorflow import keras from tensorflow.keras import layers from google.colab import drive from tensorflow.python.framework import ops from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.models import Model from tensorflow.keras.layers.experimental import preprocessing # We mount our Google Drive that contains the datasets drive.mount('/content/gdrive') ############# the code for this class came from Google's RNN Text Generation page but has been modified to work witht he current RNN ############### class OneStep(tf.keras.Model): def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0): super().__init__() self.temperature = temperature self.model = model self.chars_from_ids = chars_from_ids self.ids_from_chars = ids_from_chars # Create a mask to prevent "[UNK]" from being generated. skip_ids = self.ids_from_chars(['[UNK]'])[:, None] sparse_mask = tf.SparseTensor( # Put a -inf at each bad index. values=[-float('inf')]*len(skip_ids), indices=skip_ids, # Match the shape to the vocabulary dense_shape=[len(ids_from_chars.get_vocabulary())+1]) self.prediction_mask = tf.sparse.to_dense(sparse_mask) #@tf.function def generate_one_step(self, inputs): # Convert strings to token IDs. input_chars = tf.strings.unicode_split(inputs, 'UTF-8') input_ids = self.ids_from_chars(input_chars).to_tensor() # convert the input into one hot tensors input_ids = tf.one_hot(input_ids,21) input_ids = ops.convert_to_tensor(input_ids, dtype=tf.float32) # Run the model. predicted_logits = self.model(inputs=input_ids) # Only use the last prediction. predicted_logits = predicted_logits / self.temperature # Apply the prediction mask: prevent "[UNK]" from being generated. predicted_logits = predicted_logits + self.prediction_mask # Sample the output logits to generate token IDs. predicted_ids = tf.random.categorical(predicted_logits, num_samples=1) # Convert from token ids to characters predicted_chars = self.chars_from_ids(predicted_ids) # Return the characters. return predicted_chars ##### end of class ##### # convert made up sequence to a tensor # line is the temp[0] pass as an argument here def FlipChars(line):
### The function for gathering tests def read_seqV2(sequence): f = open(sequence, 'r') test = [] testlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return test, testlabel # get testing data if count % 5 == 0: # else, put it in a the training set, also known as x_train, and training label set, also known as y_train. temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] count += 1 #### The function for reading and parsing file def read_seq(sequence): f = open(sequence, 'r') train = [] trainlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return train, trainlabel # training data addin if count % 5 != 0: # training data found temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] count += 1 # We parse files to get training data seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt') seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt') # We reshape labels to be 2d arrays train_label = np.asarray(train_label).astype('float32').reshape((-1,1)) test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) # We convert labels (y_train and y_test) to tensors train_label = ops.convert_to_tensor(train_label, dtype=tf.float32) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) # We make the training and testing tensors floats instead of ints seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32) seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32) # We create out recurrent neural network (RNN) model = keras.Sequential( [ layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.5), layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized layers.BatchNormalization(), layers.Dropout(0.6), layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.7), layers.Dense(21, activation='softmax') # Output layer ] ) """ For extra credit portion - testing lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-2, decay_steps=1000, decay_rate=0.9) optimizer = keras.optimizers.SGD(learning_rate=lr_schedule) Optimizer.apply_gradients( grads_and_vars, name=None, experimental_aggregate_gradients=True ) """ # We compile the model model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) # We get the history of the model to plot stuff historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label)) # save the model #model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128') # We print out a summary of our model model.summary() # We plot results of the training model plt.plot(historyMod.history['accuracy'], label='Training data') plt.plot(historyMod.history['val_accuracy'], label='Validation data') plt.title('Accuracy training vs. Accuracy validation') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # We plot the loss plt.plot(historyMod.history['loss'], label='Training data') plt.plot(historyMod.history['val_loss'], label='Validation data') plt.title('Loss training vs. Loss validation') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # This portion takes care of the perplexity plotting perplexity = tf.exp(historyMod.history['loss']) plt.plot(perplexity) plt.title('Perplexity of RNN') plt.show() ############################################################################ ########################### TASK III ##################################### # proteins for trial protein_seq = "MVLSEGEWQLVLHVWAKVEADVAGHGQDILIRAEKLFNQDVDAAVRGILR" protein_seq2 = "MPPYTVVYFPVRGRCAALRMLLADQGQSWKEEVVTVETWQEGSLKASCLY" protein_seq3 = "KVFERCELARTLKRLGMDGYRGISLANWMCLAKWESGYNTRATNYNAGDR" protein_seq4 = "FNASSGDSKKIVGVFYKANEYATKNPNFLGCVENALGIRDWLESQGHQYI" protein_seq5 = "MDSEVQRDGRILDLIDDAWREDKLPYEDVAIPLNELPEPEQDNGGTTESV" # protein to get vocabulary example_protein = "MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDTVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL" # getting the vocabulary of the protein sequence as well as their associated IDs vocab = sorted(set(example_protein)) ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab), mask_token=None) chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None) # get the one step modelclass initialized so prediction can be performed one_step_model = OneStep(model, chars_from_ids, ids_from_chars) # preparing trials trials = 1 k = 1 i = 0 array_of_proteins = [] array_of_proteins.append(protein_seq) array_of_proteins.append(protein_seq2) array_of_proteins.append(protein_seq3) array_of_proteins.append(protein_seq4) array_of_proteins.append(protein_seq5) #array_of_proteins = np.array(array_of_proteins) # beginning trials while trials < 6: print("\nBeginning trial " + str(trials)) print("===============================================================") print("===============================================================\n") ar = array_of_proteins[i] while k != 20: chars = ar[:k] next_char = tf.constant([chars]) result = [] result.append(chars) next_letter = [] for n in range(50-k): next_letter = one_step_model.generate_one_step(next_char) next_letter_np = next_letter.numpy() result.append(next_letter_np[0]) print("When k = " + str(k)) print("-"*len(result)) #k += 1 print("\n-----------Finding matches-----------\n") print("Prediction with seed of " + str(k)) matches = 0 checkMatches = ar[k:] k += 1 for x in range(len(checkMatches)): if checkMatches[x].encode("utf-8") == result[x]: matches += 1 else: continue print(str(matches) + " matches") print("________________________\n") print("\n") # end of for loop and going on to the next rial i += 1 k = 1 trials += 1 print("\n End of trials.")
testlabel = [] test = [] temp = line.split() charList = list(temp) seq = [] for i in range(50): # convert each letter into an int, randomly flip a char or more # these 3 if conditions randomly flip a character letter to find dependeny, can comment out either or for experimentation if i == 15: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i == 25: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i >= 45: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 # get one hot tensor and test label label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) # convert labels to tensors and one hot to a tensor again test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) test = ops.convert_to_tensor(seq, dtype=tf.float32) return test, test_label
identifier_body
ProteinRNN.py
# Here we import the modules that we will use for the task import numpy as np import math import statistics import tensorflow as tf import string import random import matplotlib.pyplot as plt from tensorflow import keras from tensorflow.keras import layers from google.colab import drive from tensorflow.python.framework import ops from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.models import Model from tensorflow.keras.layers.experimental import preprocessing # We mount our Google Drive that contains the datasets drive.mount('/content/gdrive') ############# the code for this class came from Google's RNN Text Generation page but has been modified to work witht he current RNN ############### class OneStep(tf.keras.Model): def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0): super().__init__() self.temperature = temperature self.model = model self.chars_from_ids = chars_from_ids self.ids_from_chars = ids_from_chars # Create a mask to prevent "[UNK]" from being generated. skip_ids = self.ids_from_chars(['[UNK]'])[:, None] sparse_mask = tf.SparseTensor( # Put a -inf at each bad index. values=[-float('inf')]*len(skip_ids), indices=skip_ids, # Match the shape to the vocabulary dense_shape=[len(ids_from_chars.get_vocabulary())+1]) self.prediction_mask = tf.sparse.to_dense(sparse_mask) #@tf.function def generate_one_step(self, inputs): # Convert strings to token IDs. input_chars = tf.strings.unicode_split(inputs, 'UTF-8') input_ids = self.ids_from_chars(input_chars).to_tensor() # convert the input into one hot tensors input_ids = tf.one_hot(input_ids,21) input_ids = ops.convert_to_tensor(input_ids, dtype=tf.float32) # Run the model. predicted_logits = self.model(inputs=input_ids) # Only use the last prediction. predicted_logits = predicted_logits / self.temperature # Apply the prediction mask: prevent "[UNK]" from being generated. predicted_logits = predicted_logits + self.prediction_mask # Sample the output logits to generate token IDs. predicted_ids = tf.random.categorical(predicted_logits, num_samples=1) # Convert from token ids to characters predicted_chars = self.chars_from_ids(predicted_ids) # Return the characters. return predicted_chars ##### end of class ##### # convert made up sequence to a tensor # line is the temp[0] pass as an argument here def
(line): testlabel = [] test = [] temp = line.split() charList = list(temp) seq = [] for i in range(50): # convert each letter into an int, randomly flip a char or more # these 3 if conditions randomly flip a character letter to find dependeny, can comment out either or for experimentation if i == 15: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i == 25: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i >= 45: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 # get one hot tensor and test label label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) # convert labels to tensors and one hot to a tensor again test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) test = ops.convert_to_tensor(seq, dtype=tf.float32) return test, test_label ### The function for gathering tests def read_seqV2(sequence): f = open(sequence, 'r') test = [] testlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return test, testlabel # get testing data if count % 5 == 0: # else, put it in a the training set, also known as x_train, and training label set, also known as y_train. temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] count += 1 #### The function for reading and parsing file def read_seq(sequence): f = open(sequence, 'r') train = [] trainlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return train, trainlabel # training data addin if count % 5 != 0: # training data found temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] count += 1 # We parse files to get training data seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt') seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt') # We reshape labels to be 2d arrays train_label = np.asarray(train_label).astype('float32').reshape((-1,1)) test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) # We convert labels (y_train and y_test) to tensors train_label = ops.convert_to_tensor(train_label, dtype=tf.float32) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) # We make the training and testing tensors floats instead of ints seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32) seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32) # We create out recurrent neural network (RNN) model = keras.Sequential( [ layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.5), layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized layers.BatchNormalization(), layers.Dropout(0.6), layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.7), layers.Dense(21, activation='softmax') # Output layer ] ) """ For extra credit portion - testing lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-2, decay_steps=1000, decay_rate=0.9) optimizer = keras.optimizers.SGD(learning_rate=lr_schedule) Optimizer.apply_gradients( grads_and_vars, name=None, experimental_aggregate_gradients=True ) """ # We compile the model model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) # We get the history of the model to plot stuff historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label)) # save the model #model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128') # We print out a summary of our model model.summary() # We plot results of the training model plt.plot(historyMod.history['accuracy'], label='Training data') plt.plot(historyMod.history['val_accuracy'], label='Validation data') plt.title('Accuracy training vs. Accuracy validation') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # We plot the loss plt.plot(historyMod.history['loss'], label='Training data') plt.plot(historyMod.history['val_loss'], label='Validation data') plt.title('Loss training vs. Loss validation') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # This portion takes care of the perplexity plotting perplexity = tf.exp(historyMod.history['loss']) plt.plot(perplexity) plt.title('Perplexity of RNN') plt.show() ############################################################################ ########################### TASK III ##################################### # proteins for trial protein_seq = "MVLSEGEWQLVLHVWAKVEADVAGHGQDILIRAEKLFNQDVDAAVRGILR" protein_seq2 = "MPPYTVVYFPVRGRCAALRMLLADQGQSWKEEVVTVETWQEGSLKASCLY" protein_seq3 = "KVFERCELARTLKRLGMDGYRGISLANWMCLAKWESGYNTRATNYNAGDR" protein_seq4 = "FNASSGDSKKIVGVFYKANEYATKNPNFLGCVENALGIRDWLESQGHQYI" protein_seq5 = "MDSEVQRDGRILDLIDDAWREDKLPYEDVAIPLNELPEPEQDNGGTTESV" # protein to get vocabulary example_protein = "MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDTVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL" # getting the vocabulary of the protein sequence as well as their associated IDs vocab = sorted(set(example_protein)) ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab), mask_token=None) chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None) # get the one step modelclass initialized so prediction can be performed one_step_model = OneStep(model, chars_from_ids, ids_from_chars) # preparing trials trials = 1 k = 1 i = 0 array_of_proteins = [] array_of_proteins.append(protein_seq) array_of_proteins.append(protein_seq2) array_of_proteins.append(protein_seq3) array_of_proteins.append(protein_seq4) array_of_proteins.append(protein_seq5) #array_of_proteins = np.array(array_of_proteins) # beginning trials while trials < 6: print("\nBeginning trial " + str(trials)) print("===============================================================") print("===============================================================\n") ar = array_of_proteins[i] while k != 20: chars = ar[:k] next_char = tf.constant([chars]) result = [] result.append(chars) next_letter = [] for n in range(50-k): next_letter = one_step_model.generate_one_step(next_char) next_letter_np = next_letter.numpy() result.append(next_letter_np[0]) print("When k = " + str(k)) print("-"*len(result)) #k += 1 print("\n-----------Finding matches-----------\n") print("Prediction with seed of " + str(k)) matches = 0 checkMatches = ar[k:] k += 1 for x in range(len(checkMatches)): if checkMatches[x].encode("utf-8") == result[x]: matches += 1 else: continue print(str(matches) + " matches") print("________________________\n") print("\n") # end of for loop and going on to the next rial i += 1 k = 1 trials += 1 print("\n End of trials.")
FlipChars
identifier_name
ProteinRNN.py
# Here we import the modules that we will use for the task import numpy as np import math import statistics import tensorflow as tf import string import random import matplotlib.pyplot as plt from tensorflow import keras from tensorflow.keras import layers from google.colab import drive from tensorflow.python.framework import ops from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.models import Model from tensorflow.keras.layers.experimental import preprocessing # We mount our Google Drive that contains the datasets drive.mount('/content/gdrive') ############# the code for this class came from Google's RNN Text Generation page but has been modified to work witht he current RNN ############### class OneStep(tf.keras.Model): def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0): super().__init__() self.temperature = temperature self.model = model self.chars_from_ids = chars_from_ids self.ids_from_chars = ids_from_chars # Create a mask to prevent "[UNK]" from being generated. skip_ids = self.ids_from_chars(['[UNK]'])[:, None] sparse_mask = tf.SparseTensor( # Put a -inf at each bad index. values=[-float('inf')]*len(skip_ids), indices=skip_ids, # Match the shape to the vocabulary dense_shape=[len(ids_from_chars.get_vocabulary())+1]) self.prediction_mask = tf.sparse.to_dense(sparse_mask) #@tf.function def generate_one_step(self, inputs): # Convert strings to token IDs. input_chars = tf.strings.unicode_split(inputs, 'UTF-8') input_ids = self.ids_from_chars(input_chars).to_tensor() # convert the input into one hot tensors input_ids = tf.one_hot(input_ids,21) input_ids = ops.convert_to_tensor(input_ids, dtype=tf.float32) # Run the model. predicted_logits = self.model(inputs=input_ids) # Only use the last prediction. predicted_logits = predicted_logits / self.temperature # Apply the prediction mask: prevent "[UNK]" from being generated. predicted_logits = predicted_logits + self.prediction_mask # Sample the output logits to generate token IDs. predicted_ids = tf.random.categorical(predicted_logits, num_samples=1) # Convert from token ids to characters predicted_chars = self.chars_from_ids(predicted_ids) # Return the characters. return predicted_chars ##### end of class ##### # convert made up sequence to a tensor # line is the temp[0] pass as an argument here def FlipChars(line): testlabel = [] test = [] temp = line.split() charList = list(temp) seq = [] for i in range(50): # convert each letter into an int, randomly flip a char or more # these 3 if conditions randomly flip a character letter to find dependeny, can comment out either or for experimentation if i == 15: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i == 25: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i >= 45: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 # get one hot tensor and test label label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) # convert labels to tensors and one hot to a tensor again test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) test = ops.convert_to_tensor(seq, dtype=tf.float32) return test, test_label ### The function for gathering tests def read_seqV2(sequence): f = open(sequence, 'r') test = [] testlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return test, testlabel # get testing data if count % 5 == 0: # else, put it in a the training set, also known as x_train, and training label set, also known as y_train. temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] count += 1 #### The function for reading and parsing file def read_seq(sequence): f = open(sequence, 'r') train = [] trainlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return train, trainlabel # training data addin if count % 5 != 0: # training data found temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] count += 1 # We parse files to get training data seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt') seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt') # We reshape labels to be 2d arrays train_label = np.asarray(train_label).astype('float32').reshape((-1,1)) test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) # We convert labels (y_train and y_test) to tensors train_label = ops.convert_to_tensor(train_label, dtype=tf.float32) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) # We make the training and testing tensors floats instead of ints seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32) seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32) # We create out recurrent neural network (RNN) model = keras.Sequential( [ layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.5), layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized layers.BatchNormalization(), layers.Dropout(0.6), layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.7), layers.Dense(21, activation='softmax') # Output layer ] ) """ For extra credit portion - testing lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-2, decay_steps=1000, decay_rate=0.9) optimizer = keras.optimizers.SGD(learning_rate=lr_schedule) Optimizer.apply_gradients( grads_and_vars, name=None, experimental_aggregate_gradients=True ) """ # We compile the model model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) # We get the history of the model to plot stuff historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label)) # save the model #model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128') # We print out a summary of our model model.summary() # We plot results of the training model plt.plot(historyMod.history['accuracy'], label='Training data') plt.plot(historyMod.history['val_accuracy'], label='Validation data') plt.title('Accuracy training vs. Accuracy validation') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # We plot the loss plt.plot(historyMod.history['loss'], label='Training data') plt.plot(historyMod.history['val_loss'], label='Validation data') plt.title('Loss training vs. Loss validation') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # This portion takes care of the perplexity plotting perplexity = tf.exp(historyMod.history['loss']) plt.plot(perplexity) plt.title('Perplexity of RNN') plt.show() ############################################################################ ########################### TASK III ##################################### # proteins for trial protein_seq = "MVLSEGEWQLVLHVWAKVEADVAGHGQDILIRAEKLFNQDVDAAVRGILR" protein_seq2 = "MPPYTVVYFPVRGRCAALRMLLADQGQSWKEEVVTVETWQEGSLKASCLY" protein_seq3 = "KVFERCELARTLKRLGMDGYRGISLANWMCLAKWESGYNTRATNYNAGDR" protein_seq4 = "FNASSGDSKKIVGVFYKANEYATKNPNFLGCVENALGIRDWLESQGHQYI" protein_seq5 = "MDSEVQRDGRILDLIDDAWREDKLPYEDVAIPLNELPEPEQDNGGTTESV" # protein to get vocabulary example_protein = "MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDTVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL" # getting the vocabulary of the protein sequence as well as their associated IDs vocab = sorted(set(example_protein)) ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab), mask_token=None) chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None) # get the one step modelclass initialized so prediction can be performed one_step_model = OneStep(model, chars_from_ids, ids_from_chars) # preparing trials trials = 1 k = 1 i = 0 array_of_proteins = [] array_of_proteins.append(protein_seq) array_of_proteins.append(protein_seq2) array_of_proteins.append(protein_seq3) array_of_proteins.append(protein_seq4) array_of_proteins.append(protein_seq5) #array_of_proteins = np.array(array_of_proteins) # beginning trials while trials < 6: print("\nBeginning trial " + str(trials)) print("===============================================================") print("===============================================================\n") ar = array_of_proteins[i] while k != 20: chars = ar[:k] next_char = tf.constant([chars]) result = [] result.append(chars) next_letter = [] for n in range(50-k): next_letter = one_step_model.generate_one_step(next_char) next_letter_np = next_letter.numpy() result.append(next_letter_np[0]) print("When k = " + str(k)) print("-"*len(result)) #k += 1 print("\n-----------Finding matches-----------\n") print("Prediction with seed of " + str(k)) matches = 0 checkMatches = ar[k:] k += 1 for x in range(len(checkMatches)): if checkMatches[x].encode("utf-8") == result[x]: matches += 1 else:
print(str(matches) + " matches") print("________________________\n") print("\n") # end of for loop and going on to the next rial i += 1 k = 1 trials += 1 print("\n End of trials.")
continue
conditional_block
ProteinRNN.py
# Here we import the modules that we will use for the task import numpy as np import math import statistics import tensorflow as tf import string import random import matplotlib.pyplot as plt from tensorflow import keras from tensorflow.keras import layers from google.colab import drive from tensorflow.python.framework import ops from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.models import Model from tensorflow.keras.layers.experimental import preprocessing # We mount our Google Drive that contains the datasets drive.mount('/content/gdrive') ############# the code for this class came from Google's RNN Text Generation page but has been modified to work witht he current RNN ############### class OneStep(tf.keras.Model): def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0): super().__init__() self.temperature = temperature self.model = model self.chars_from_ids = chars_from_ids self.ids_from_chars = ids_from_chars # Create a mask to prevent "[UNK]" from being generated. skip_ids = self.ids_from_chars(['[UNK]'])[:, None] sparse_mask = tf.SparseTensor( # Put a -inf at each bad index. values=[-float('inf')]*len(skip_ids), indices=skip_ids, # Match the shape to the vocabulary dense_shape=[len(ids_from_chars.get_vocabulary())+1]) self.prediction_mask = tf.sparse.to_dense(sparse_mask) #@tf.function def generate_one_step(self, inputs): # Convert strings to token IDs. input_chars = tf.strings.unicode_split(inputs, 'UTF-8') input_ids = self.ids_from_chars(input_chars).to_tensor() # convert the input into one hot tensors input_ids = tf.one_hot(input_ids,21) input_ids = ops.convert_to_tensor(input_ids, dtype=tf.float32) # Run the model. predicted_logits = self.model(inputs=input_ids) # Only use the last prediction. predicted_logits = predicted_logits / self.temperature # Apply the prediction mask: prevent "[UNK]" from being generated. predicted_logits = predicted_logits + self.prediction_mask # Sample the output logits to generate token IDs. predicted_ids = tf.random.categorical(predicted_logits, num_samples=1) # Convert from token ids to characters predicted_chars = self.chars_from_ids(predicted_ids) # Return the characters. return predicted_chars ##### end of class #####
# line is the temp[0] pass as an argument here def FlipChars(line): testlabel = [] test = [] temp = line.split() charList = list(temp) seq = [] for i in range(50): # convert each letter into an int, randomly flip a char or more # these 3 if conditions randomly flip a character letter to find dependeny, can comment out either or for experimentation if i == 15: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i == 25: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue if i >= 45: seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1) continue seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 # get one hot tensor and test label label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) # convert labels to tensors and one hot to a tensor again test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) test = ops.convert_to_tensor(seq, dtype=tf.float32) return test, test_label ### The function for gathering tests def read_seqV2(sequence): f = open(sequence, 'r') test = [] testlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return test, testlabel # get testing data if count % 5 == 0: # else, put it in a the training set, also known as x_train, and training label set, also known as y_train. temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) test.append(seq[:len(seq)-1]) testlabel.append(label) seq = [] count += 1 #### The function for reading and parsing file def read_seq(sequence): f = open(sequence, 'r') train = [] trainlabel = [] # Reading file and extracting paths and labels with open(sequence, 'r') as File: infoFile = File.readlines() # Reading all the lines from File count = 0 for line in infoFile: #Reading line-by-line if count == 44011: return train, trainlabel # training data addin if count % 5 != 0: # training data found temp = line.split() charList = list(temp[0]) seq = [] # if for some reason the protein sequence is less than 50 char long if len(charList) < 100: for i in range(len(charList)): seq.append(ord(charList[i]) - ord('A') + 1) while len(seq) < 100: seq.append(0) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] else: for i in range(100): # convert each letter into an int seq.append(ord(charList[i]) - ord('A') + 1) # grab the labels and convert seq into a one hot encoding of 21 if seq[len(seq)-1] > 20: seq[len(seq)-1] = 20 label = seq[len(seq)-1] seq = tf.one_hot(seq,21) train.append(seq[:len(seq)-1]) trainlabel.append(label) seq = [] count += 1 # We parse files to get training data seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt') seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt') # We reshape labels to be 2d arrays train_label = np.asarray(train_label).astype('float32').reshape((-1,1)) test_label = np.asarray(test_label).astype('float32').reshape((-1,1)) # We convert labels (y_train and y_test) to tensors train_label = ops.convert_to_tensor(train_label, dtype=tf.float32) test_label = ops.convert_to_tensor(test_label, dtype=tf.float32) # We make the training and testing tensors floats instead of ints seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32) seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32) # We create out recurrent neural network (RNN) model = keras.Sequential( [ layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.5), layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized layers.BatchNormalization(), layers.Dropout(0.6), layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer layers.BatchNormalization(), layers.Dropout(0.7), layers.Dense(21, activation='softmax') # Output layer ] ) """ For extra credit portion - testing lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-2, decay_steps=1000, decay_rate=0.9) optimizer = keras.optimizers.SGD(learning_rate=lr_schedule) Optimizer.apply_gradients( grads_and_vars, name=None, experimental_aggregate_gradients=True ) """ # We compile the model model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) # We get the history of the model to plot stuff historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label)) # save the model #model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128') # We print out a summary of our model model.summary() # We plot results of the training model plt.plot(historyMod.history['accuracy'], label='Training data') plt.plot(historyMod.history['val_accuracy'], label='Validation data') plt.title('Accuracy training vs. Accuracy validation') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # We plot the loss plt.plot(historyMod.history['loss'], label='Training data') plt.plot(historyMod.history['val_loss'], label='Validation data') plt.title('Loss training vs. Loss validation') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(loc="upper left") plt.show() # This portion takes care of the perplexity plotting perplexity = tf.exp(historyMod.history['loss']) plt.plot(perplexity) plt.title('Perplexity of RNN') plt.show() ############################################################################ ########################### TASK III ##################################### # proteins for trial protein_seq = "MVLSEGEWQLVLHVWAKVEADVAGHGQDILIRAEKLFNQDVDAAVRGILR" protein_seq2 = "MPPYTVVYFPVRGRCAALRMLLADQGQSWKEEVVTVETWQEGSLKASCLY" protein_seq3 = "KVFERCELARTLKRLGMDGYRGISLANWMCLAKWESGYNTRATNYNAGDR" protein_seq4 = "FNASSGDSKKIVGVFYKANEYATKNPNFLGCVENALGIRDWLESQGHQYI" protein_seq5 = "MDSEVQRDGRILDLIDDAWREDKLPYEDVAIPLNELPEPEQDNGGTTESV" # protein to get vocabulary example_protein = "MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDTVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL" # getting the vocabulary of the protein sequence as well as their associated IDs vocab = sorted(set(example_protein)) ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab), mask_token=None) chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None) # get the one step modelclass initialized so prediction can be performed one_step_model = OneStep(model, chars_from_ids, ids_from_chars) # preparing trials trials = 1 k = 1 i = 0 array_of_proteins = [] array_of_proteins.append(protein_seq) array_of_proteins.append(protein_seq2) array_of_proteins.append(protein_seq3) array_of_proteins.append(protein_seq4) array_of_proteins.append(protein_seq5) #array_of_proteins = np.array(array_of_proteins) # beginning trials while trials < 6: print("\nBeginning trial " + str(trials)) print("===============================================================") print("===============================================================\n") ar = array_of_proteins[i] while k != 20: chars = ar[:k] next_char = tf.constant([chars]) result = [] result.append(chars) next_letter = [] for n in range(50-k): next_letter = one_step_model.generate_one_step(next_char) next_letter_np = next_letter.numpy() result.append(next_letter_np[0]) print("When k = " + str(k)) print("-"*len(result)) #k += 1 print("\n-----------Finding matches-----------\n") print("Prediction with seed of " + str(k)) matches = 0 checkMatches = ar[k:] k += 1 for x in range(len(checkMatches)): if checkMatches[x].encode("utf-8") == result[x]: matches += 1 else: continue print(str(matches) + " matches") print("________________________\n") print("\n") # end of for loop and going on to the next rial i += 1 k = 1 trials += 1 print("\n End of trials.")
# convert made up sequence to a tensor
random_line_split
accounts.go
// Copyright © 2018-2019 Apollo Technologies Pte. Ltd. All Rights Reserved. package accounts import ( "context" "fmt" "math/big" "net/http" "strconv" "github.com/HiNounou029/nounouchain/api/utils" "github.com/HiNounou029/nounouchain/polo" "github.com/HiNounou029/nounouchain/common/xenv" "github.com/HiNounou029/nounouchain/core/block" "github.com/HiNounou029/nounouchain/core/chain" "github.com/HiNounou029/nounouchain/core/tx" "github.com/HiNounou029/nounouchain/state" "github.com/HiNounou029/nounouchain/vm/runtime" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/HiNounou029/nounouchain/poloclient" ) var ( balanceSig = "0x1d7976f3" //balanceOf(address) ) type Accounts struct { chain *chain.Chain stateCreator *state.Creator callGasLimit uint64 } func New(chain *chain.Chain, stateCreator *state.Creator, callGasLimit uint64) *Accounts { return &Accounts{ chain, stateCreator, callGasLimit, } } func (a *Accounts) getCode(addr polo.Address, stateRoot polo.Bytes32) ([]byte, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return nil, err } code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return code, nil } func (a *Accounts) handleGetCode(w http.ResponseWriter, req *http.Request) error { hexAddr := mux.Vars(req)["address"] addr, err := polo.ParseAddress(hexAddr) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } code, err := a.getCode(addr, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"code": hexutil.Encode(code)}) } func (a *Accounts) getAccount(addr polo.Address, header *block.Header) (*Account, error) { state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } b := state.GetBalance(addr) code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return &Account{ Balance: math.HexOrDecimal256(*b), HasCode: len(code) != 0, }, nil } func (a *Accounts) getStorage(addr polo.Address, key polo.Bytes32, stateRoot polo.Bytes32) (polo.Bytes32, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return polo.Bytes32{}, err } storage := state.GetStorage(addr, key) if err := state.Err(); err != nil { return polo.Bytes32{}, err } return storage, nil } func (a *Accounts) handleGetAccount(w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } tokenAddress := req.URL.Query().Get("tokenAddress") if len(tokenAddress) > 0 { //获取ERC20 token balance contractAddress, err := polo.ParseAddress(tokenAddress) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } callData := &CallData{} hexStr := balanceSig data := poloclient.FromHex(hexStr) bytesAddr := addr.Bytes() for i := 0; i < 32 - len(bytesAddr); i++ { data = append(data, 0) } for _, v := range bytesAddr { data = append(data, byte(v)) } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: &contractAddress, Value: callData.Value, Data: hexutil.Encode(data), }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } decodeData, err := hexutil.Decode(results[0].Data) if err != nil { return err } if len(decodeData) > 32 { return fmt.Errorf("decodeData error") } b := big.NewInt(0) b.SetBytes(decodeData) acc := &Account{ Balance: math.HexOrDecimal256(*b), HasCode: false, } return utils.WriteTo(w, req, acc) } else { acc, err := a.getAccount(addr, h) if err != nil { return err } return utils.WriteTo(w, req, acc) } } func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } key, err := polo.ParseBytes32(mux.Vars(req)["key"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "key")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } storage, err := a.getStorage(addr, key, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"value": storage.String()}) } func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error { callData := &CallData{} if err := utils.ParseJSON(req.Body, &callData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } var addr *polo.Address if mux.Vars(req)["address"] != "" { address, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } addr = &address } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: addr, Value: callData.Value, Data: callData.Data, }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results[0]) } func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error { batchCallData := &BatchCallData{} if err := utils.ParseJSON(req.Body, &batchCallData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results) } func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) { gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData) if err != nil { return nil, err } state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } signer, _ := header.Signer() rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state,
Beneficiary: header.Beneficiary(), Signer: signer, Number: header.Number(), Time: header.Timestamp(), GasLimit: header.GasLimit(), TotalScore: header.TotalScore()}) results = make(BatchCallResults, 0) vmout := make(chan *runtime.Output, 1) for i, clause := range clauses { exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{ Origin: *caller, GasPrice: gasPrice}) go func() { out, _ := exec() vmout <- out }() select { case <-ctx.Done(): interrupt() return nil, ctx.Err() case out := <-vmout: if err := rt.Seeker().Err(); err != nil { return nil, err } if err := state.Err(); err != nil { return nil, err } results = append(results, convertCallResultWithInputGas(out, gas)) if out.VMErr != nil { return results, nil } gas = out.LeftOverGas } } return results, nil } func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) { if batchCallData.Gas > a.callGasLimit { return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit")) } else if batchCallData.Gas == 0 { gas = a.callGasLimit } else { gas = batchCallData.Gas } if batchCallData.GasPrice == nil { gasPrice = new(big.Int) } else { gasPrice = (*big.Int)(batchCallData.GasPrice) } if batchCallData.Caller == nil { caller = &polo.Address{} } else { caller = batchCallData.Caller } clauses = make([]*tx.Clause, len(batchCallData.Clauses)) for i, c := range batchCallData.Clauses { var value *big.Int if c.Value == nil { value = new(big.Int) } else { value = (*big.Int)(c.Value) } var data []byte if c.Data != "" { data, err = hexutil.Decode(c.Data) if err != nil { err = utils.BadRequest(errors.WithMessage(err, fmt.Sprintf("data[%d]", i))) return } } clauses[i] = tx.NewClause(c.To).WithData(data).WithValue(value) } return } func (a *Accounts) handleRevision(revision string) (*block.Header, error) { if revision == "" || revision == "best" { return a.chain.BestBlock().Header(), nil } if len(revision) == 66 || len(revision) == 64 { blockID, err := polo.ParseBytes32(revision) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } h, err := a.chain.GetBlockHeader(blockID) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } n, err := strconv.ParseUint(revision, 0, 0) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } if n > math.MaxUint32 { return nil, utils.BadRequest(errors.WithMessage(errors.New("block number out of max uint32"), "revision")) } h, err := a.chain.GetTrunkBlockHeader(uint32(n)) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } func (a *Accounts) Mount(root *mux.Router, pathPrefix string) { sub := root.PathPrefix(pathPrefix).Subrouter() sub.Path("/*").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallBatchCode)) sub.Path("/{address}").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetAccount)) sub.Path("/{address}/code").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetCode)) sub.Path("/{address}/storage/{key}").Methods("GET").HandlerFunc(utils.WrapHandlerFunc(a.handleGetStorage)) sub.Path("").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) sub.Path("/{address}").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) }
&xenv.BlockContext{
random_line_split
accounts.go
// Copyright © 2018-2019 Apollo Technologies Pte. Ltd. All Rights Reserved. package accounts import ( "context" "fmt" "math/big" "net/http" "strconv" "github.com/HiNounou029/nounouchain/api/utils" "github.com/HiNounou029/nounouchain/polo" "github.com/HiNounou029/nounouchain/common/xenv" "github.com/HiNounou029/nounouchain/core/block" "github.com/HiNounou029/nounouchain/core/chain" "github.com/HiNounou029/nounouchain/core/tx" "github.com/HiNounou029/nounouchain/state" "github.com/HiNounou029/nounouchain/vm/runtime" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/HiNounou029/nounouchain/poloclient" ) var ( balanceSig = "0x1d7976f3" //balanceOf(address) ) type Accounts struct { chain *chain.Chain stateCreator *state.Creator callGasLimit uint64 } func New(chain *chain.Chain, stateCreator *state.Creator, callGasLimit uint64) *Accounts { return &Accounts{ chain, stateCreator, callGasLimit, } } func (a *Accounts) getCode(addr polo.Address, stateRoot polo.Bytes32) ([]byte, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return nil, err } code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return code, nil } func (a *Accounts) handleGetCode(w http.ResponseWriter, req *http.Request) error { hexAddr := mux.Vars(req)["address"] addr, err := polo.ParseAddress(hexAddr) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } code, err := a.getCode(addr, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"code": hexutil.Encode(code)}) } func (a *Accounts) getAccount(addr polo.Address, header *block.Header) (*Account, error) { state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } b := state.GetBalance(addr) code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return &Account{ Balance: math.HexOrDecimal256(*b), HasCode: len(code) != 0, }, nil } func (a *Accounts) getStorage(addr polo.Address, key polo.Bytes32, stateRoot polo.Bytes32) (polo.Bytes32, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return polo.Bytes32{}, err } storage := state.GetStorage(addr, key) if err := state.Err(); err != nil { return polo.Bytes32{}, err } return storage, nil } func (a *Accounts) handleGetAccount(w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } tokenAddress := req.URL.Query().Get("tokenAddress") if len(tokenAddress) > 0 { //获取ERC20 token balance contractAddress, err := polo.ParseAddress(tokenAddress) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } callData := &CallData{} hexStr := balanceSig data := poloclient.FromHex(hexStr) bytesAddr := addr.Bytes() for i := 0; i < 32 - len(bytesAddr); i++ { data = append(data, 0) } for _, v := range bytesAddr { data = append(data, byte(v)) } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: &contractAddress, Value: callData.Value, Data: hexutil.Encode(data), }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } decodeData, err := hexutil.Decode(results[0].Data) if err != nil { return err } if len(decodeData) > 32 { return fmt.Errorf("decodeData error") } b := big.NewInt(0) b.SetBytes(decodeData) acc := &Account{ Balance: math.HexOrDecimal256(*b), HasCode: false, } return utils.WriteTo(w, req, acc) } else { acc, err := a.getAccount(addr, h) if err != nil { return err } return utils.WriteTo(w, req, acc) } } func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } key, err := polo.ParseBytes32(mux.Vars(req)["key"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "key")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } storage, err := a.getStorage(addr, key, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"value": storage.String()}) } func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error { callData := &CallData{} if err := utils.ParseJSON(req.Body, &callData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } var addr *polo.Address if mux.Vars(req)["address"] != "" { address, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } addr = &address } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: addr, Value: callData.Value, Data: callData.Data, }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results[0]) } func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error { batchCallData := &BatchCallData{} if err := utils.ParseJSON(req.Body, &batchCallData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results) } func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) { gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData) if err != nil { return nil, err } state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } signer, _ := header.Signer() rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state, &xenv.BlockContext{ Beneficiary: header.Beneficiary(), Signer: signer, Number: header.Number(), Time: header.Timestamp(), GasLimit: header.GasLimit(), TotalScore: header.TotalScore()}) results = make(BatchCallResults, 0) vmout := make(chan *runtime.Output, 1) for i, clause := range clauses { exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{ Origin: *caller, GasPrice: gasPrice}) go func() { out, _ := exec() vmout <- out }() select { case <-ctx.Done(): interrupt() return nil, ctx.Err() case out := <-vmout: if err := rt.Seeker().Err(); err != nil { return nil, err } if err := state.Err(); err != nil { return nil, err } results = append(results, convertCallResultWithInputGas(out, gas)) if out.VMErr != nil { return results, nil } gas = out.LeftOverGas } } return results, nil } func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) { if batchCallData.Gas > a.callGasLimit { return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit")) } else if batchCallData.Gas == 0 { gas = a.callGasLimit } else { gas = batchCallData.Gas } if batchCallData.GasPrice == nil { gasPrice = new(big.Int) } else { gasPrice = (*big.Int)(batchCallData.GasPrice) } if batchCallData.Caller == nil { caller = &polo.Address{} } else { caller = batchCallData.Caller } clauses = make([]*tx.Clause, len(batchCallData.Clauses)) for i, c := range batchCallData.Clauses { var value *big.Int if c.Value == nil { value = new(big.Int) } else { value = (*big.Int)(c.Value) } var data []byte if c.Data != "" { data, err = hexutil.Decode(c.Data) if err != nil { err = utils.BadRequest(errors.WithMessage(err, fmt.Sprintf("data[%d]", i))) return } } clauses[i] = tx.NewClause(c.To).WithData(data).WithValue(value) } return } func (a *Accounts) handleRevision(revision string) (*block.Header, error) { if
c (a *Accounts) Mount(root *mux.Router, pathPrefix string) { sub := root.PathPrefix(pathPrefix).Subrouter() sub.Path("/*").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallBatchCode)) sub.Path("/{address}").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetAccount)) sub.Path("/{address}/code").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetCode)) sub.Path("/{address}/storage/{key}").Methods("GET").HandlerFunc(utils.WrapHandlerFunc(a.handleGetStorage)) sub.Path("").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) sub.Path("/{address}").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) }
revision == "" || revision == "best" { return a.chain.BestBlock().Header(), nil } if len(revision) == 66 || len(revision) == 64 { blockID, err := polo.ParseBytes32(revision) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } h, err := a.chain.GetBlockHeader(blockID) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } n, err := strconv.ParseUint(revision, 0, 0) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } if n > math.MaxUint32 { return nil, utils.BadRequest(errors.WithMessage(errors.New("block number out of max uint32"), "revision")) } h, err := a.chain.GetTrunkBlockHeader(uint32(n)) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } fun
identifier_body
accounts.go
// Copyright © 2018-2019 Apollo Technologies Pte. Ltd. All Rights Reserved. package accounts import ( "context" "fmt" "math/big" "net/http" "strconv" "github.com/HiNounou029/nounouchain/api/utils" "github.com/HiNounou029/nounouchain/polo" "github.com/HiNounou029/nounouchain/common/xenv" "github.com/HiNounou029/nounouchain/core/block" "github.com/HiNounou029/nounouchain/core/chain" "github.com/HiNounou029/nounouchain/core/tx" "github.com/HiNounou029/nounouchain/state" "github.com/HiNounou029/nounouchain/vm/runtime" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/HiNounou029/nounouchain/poloclient" ) var ( balanceSig = "0x1d7976f3" //balanceOf(address) ) type Accounts struct { chain *chain.Chain stateCreator *state.Creator callGasLimit uint64 } func New(chain *chain.Chain, stateCreator *state.Creator, callGasLimit uint64) *Accounts { return &Accounts{ chain, stateCreator, callGasLimit, } } func (a *Accounts) getCode(addr polo.Address, stateRoot polo.Bytes32) ([]byte, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return nil, err } code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return code, nil } func (a *Accounts) handleGetCode(w http.ResponseWriter, req *http.Request) error { hexAddr := mux.Vars(req)["address"] addr, err := polo.ParseAddress(hexAddr) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } code, err := a.getCode(addr, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"code": hexutil.Encode(code)}) } func (a *Accounts) getAccount(addr polo.Address, header *block.Header) (*Account, error) { state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } b := state.GetBalance(addr) code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return &Account{ Balance: math.HexOrDecimal256(*b), HasCode: len(code) != 0, }, nil } func (a *Accounts) getStorage(addr polo.Address, key polo.Bytes32, stateRoot polo.Bytes32) (polo.Bytes32, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return polo.Bytes32{}, err } storage := state.GetStorage(addr, key) if err := state.Err(); err != nil { return polo.Bytes32{}, err } return storage, nil } func (a *Accounts) handleGetAccount(w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } tokenAddress := req.URL.Query().Get("tokenAddress") if len(tokenAddress) > 0 { //获取ERC20 token balance contractAddress, err := polo.ParseAddress(tokenAddress) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } callData := &CallData{} hexStr := balanceSig data := poloclient.FromHex(hexStr) bytesAddr := addr.Bytes() for i := 0; i < 32 - len(bytesAddr); i++ { data = append(data, 0) } for _, v := range bytesAddr { data = append(data, byte(v)) } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: &contractAddress, Value: callData.Value, Data: hexutil.Encode(data), }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } decodeData, err := hexutil.Decode(results[0].Data) if err != nil { return err } if len(decodeData) > 32 { return fmt.Errorf("decodeData error") } b := big.NewInt(0) b.SetBytes(decodeData) acc := &Account{ Balance: math.HexOrDecimal256(*b), HasCode: false, } return utils.WriteTo(w, req, acc) } else { acc, err := a.getAccount(addr, h) if err != nil { return err } return utils.WriteTo(w, req, acc) } } func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } key, err := polo.ParseBytes32(mux.Vars(req)["key"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "key")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } storage, err := a.getStorage(addr, key, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"value": storage.String()}) } func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error { callData := &CallData{} if err := utils.ParseJSON(req.Body, &callData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } var addr *polo.Address if mux.Vars(req)["address"] != "" { address, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } addr = &address } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: addr, Value: callData.Value, Data: callData.Data, }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results[0]) } func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error { batchCallData := &BatchCallData{} if err := utils.ParseJSON(req.Body, &batchCallData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results) } func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) { gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData) if err != nil { return nil, err } state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } signer, _ := header.Signer() rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state, &xenv.BlockContext{ Beneficiary: header.Beneficiary(), Signer: signer, Number: header.Number(), Time: header.Timestamp(), GasLimit: header.GasLimit(), TotalScore: header.TotalScore()}) results = make(BatchCallResults, 0) vmout := make(chan *runtime.Output, 1) for i, clause := range clauses { exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{ Origin: *caller, GasPrice: gasPrice}) go func() { out, _ := exec() vmout <- out }() select { case <-ctx.Done(): interrupt() return nil, ctx.Err() case out := <-vmout: if err := rt.Seeker().Err(); err != nil { return nil, err } if err := state.Err(); err != nil {
esults = append(results, convertCallResultWithInputGas(out, gas)) if out.VMErr != nil { return results, nil } gas = out.LeftOverGas } } return results, nil } func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) { if batchCallData.Gas > a.callGasLimit { return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit")) } else if batchCallData.Gas == 0 { gas = a.callGasLimit } else { gas = batchCallData.Gas } if batchCallData.GasPrice == nil { gasPrice = new(big.Int) } else { gasPrice = (*big.Int)(batchCallData.GasPrice) } if batchCallData.Caller == nil { caller = &polo.Address{} } else { caller = batchCallData.Caller } clauses = make([]*tx.Clause, len(batchCallData.Clauses)) for i, c := range batchCallData.Clauses { var value *big.Int if c.Value == nil { value = new(big.Int) } else { value = (*big.Int)(c.Value) } var data []byte if c.Data != "" { data, err = hexutil.Decode(c.Data) if err != nil { err = utils.BadRequest(errors.WithMessage(err, fmt.Sprintf("data[%d]", i))) return } } clauses[i] = tx.NewClause(c.To).WithData(data).WithValue(value) } return } func (a *Accounts) handleRevision(revision string) (*block.Header, error) { if revision == "" || revision == "best" { return a.chain.BestBlock().Header(), nil } if len(revision) == 66 || len(revision) == 64 { blockID, err := polo.ParseBytes32(revision) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } h, err := a.chain.GetBlockHeader(blockID) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } n, err := strconv.ParseUint(revision, 0, 0) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } if n > math.MaxUint32 { return nil, utils.BadRequest(errors.WithMessage(errors.New("block number out of max uint32"), "revision")) } h, err := a.chain.GetTrunkBlockHeader(uint32(n)) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } func (a *Accounts) Mount(root *mux.Router, pathPrefix string) { sub := root.PathPrefix(pathPrefix).Subrouter() sub.Path("/*").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallBatchCode)) sub.Path("/{address}").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetAccount)) sub.Path("/{address}/code").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetCode)) sub.Path("/{address}/storage/{key}").Methods("GET").HandlerFunc(utils.WrapHandlerFunc(a.handleGetStorage)) sub.Path("").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) sub.Path("/{address}").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) }
return nil, err } r
conditional_block
accounts.go
// Copyright © 2018-2019 Apollo Technologies Pte. Ltd. All Rights Reserved. package accounts import ( "context" "fmt" "math/big" "net/http" "strconv" "github.com/HiNounou029/nounouchain/api/utils" "github.com/HiNounou029/nounouchain/polo" "github.com/HiNounou029/nounouchain/common/xenv" "github.com/HiNounou029/nounouchain/core/block" "github.com/HiNounou029/nounouchain/core/chain" "github.com/HiNounou029/nounouchain/core/tx" "github.com/HiNounou029/nounouchain/state" "github.com/HiNounou029/nounouchain/vm/runtime" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/HiNounou029/nounouchain/poloclient" ) var ( balanceSig = "0x1d7976f3" //balanceOf(address) ) type Accounts struct { chain *chain.Chain stateCreator *state.Creator callGasLimit uint64 } func New(chain *chain.Chain, stateCreator *state.Creator, callGasLimit uint64) *Accounts { return &Accounts{ chain, stateCreator, callGasLimit, } } func (a *Accounts) getCode(addr polo.Address, stateRoot polo.Bytes32) ([]byte, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return nil, err } code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return code, nil } func (a *Accounts) handleGetCode(w http.ResponseWriter, req *http.Request) error { hexAddr := mux.Vars(req)["address"] addr, err := polo.ParseAddress(hexAddr) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } code, err := a.getCode(addr, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"code": hexutil.Encode(code)}) } func (a *Accounts) getAccount(addr polo.Address, header *block.Header) (*Account, error) { state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } b := state.GetBalance(addr) code := state.GetCode(addr) if err := state.Err(); err != nil { return nil, err } return &Account{ Balance: math.HexOrDecimal256(*b), HasCode: len(code) != 0, }, nil } func (a *Accounts) getStorage(addr polo.Address, key polo.Bytes32, stateRoot polo.Bytes32) (polo.Bytes32, error) { state, err := a.stateCreator.NewState(stateRoot) if err != nil { return polo.Bytes32{}, err } storage := state.GetStorage(addr, key) if err := state.Err(); err != nil { return polo.Bytes32{}, err } return storage, nil } func (a *Accounts) h
w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } tokenAddress := req.URL.Query().Get("tokenAddress") if len(tokenAddress) > 0 { //获取ERC20 token balance contractAddress, err := polo.ParseAddress(tokenAddress) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } callData := &CallData{} hexStr := balanceSig data := poloclient.FromHex(hexStr) bytesAddr := addr.Bytes() for i := 0; i < 32 - len(bytesAddr); i++ { data = append(data, 0) } for _, v := range bytesAddr { data = append(data, byte(v)) } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: &contractAddress, Value: callData.Value, Data: hexutil.Encode(data), }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } decodeData, err := hexutil.Decode(results[0].Data) if err != nil { return err } if len(decodeData) > 32 { return fmt.Errorf("decodeData error") } b := big.NewInt(0) b.SetBytes(decodeData) acc := &Account{ Balance: math.HexOrDecimal256(*b), HasCode: false, } return utils.WriteTo(w, req, acc) } else { acc, err := a.getAccount(addr, h) if err != nil { return err } return utils.WriteTo(w, req, acc) } } func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error { addr, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } key, err := polo.ParseBytes32(mux.Vars(req)["key"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "key")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } storage, err := a.getStorage(addr, key, h.StateRoot()) if err != nil { return err } return utils.WriteTo(w, req, map[string]string{"value": storage.String()}) } func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error { callData := &CallData{} if err := utils.ParseJSON(req.Body, &callData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } var addr *polo.Address if mux.Vars(req)["address"] != "" { address, err := polo.ParseAddress(mux.Vars(req)["address"]) if err != nil { return utils.BadRequest(errors.WithMessage(err, "address")) } addr = &address } var batchCallData = &BatchCallData{ Clauses: Clauses{ Clause{ To: addr, Value: callData.Value, Data: callData.Data, }, }, Gas: callData.Gas, GasPrice: callData.GasPrice, Caller: callData.Caller, } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results[0]) } func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error { batchCallData := &BatchCallData{} if err := utils.ParseJSON(req.Body, &batchCallData); err != nil { return utils.BadRequest(errors.WithMessage(err, "body")) } h, err := a.handleRevision(req.URL.Query().Get("revision")) if err != nil { return err } results, err := a.batchCall(req.Context(), batchCallData, h) if err != nil { return err } return utils.WriteJSON(w, results) } func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) { gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData) if err != nil { return nil, err } state, err := a.stateCreator.NewState(header.StateRoot()) if err != nil { return nil, err } signer, _ := header.Signer() rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state, &xenv.BlockContext{ Beneficiary: header.Beneficiary(), Signer: signer, Number: header.Number(), Time: header.Timestamp(), GasLimit: header.GasLimit(), TotalScore: header.TotalScore()}) results = make(BatchCallResults, 0) vmout := make(chan *runtime.Output, 1) for i, clause := range clauses { exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{ Origin: *caller, GasPrice: gasPrice}) go func() { out, _ := exec() vmout <- out }() select { case <-ctx.Done(): interrupt() return nil, ctx.Err() case out := <-vmout: if err := rt.Seeker().Err(); err != nil { return nil, err } if err := state.Err(); err != nil { return nil, err } results = append(results, convertCallResultWithInputGas(out, gas)) if out.VMErr != nil { return results, nil } gas = out.LeftOverGas } } return results, nil } func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) { if batchCallData.Gas > a.callGasLimit { return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit")) } else if batchCallData.Gas == 0 { gas = a.callGasLimit } else { gas = batchCallData.Gas } if batchCallData.GasPrice == nil { gasPrice = new(big.Int) } else { gasPrice = (*big.Int)(batchCallData.GasPrice) } if batchCallData.Caller == nil { caller = &polo.Address{} } else { caller = batchCallData.Caller } clauses = make([]*tx.Clause, len(batchCallData.Clauses)) for i, c := range batchCallData.Clauses { var value *big.Int if c.Value == nil { value = new(big.Int) } else { value = (*big.Int)(c.Value) } var data []byte if c.Data != "" { data, err = hexutil.Decode(c.Data) if err != nil { err = utils.BadRequest(errors.WithMessage(err, fmt.Sprintf("data[%d]", i))) return } } clauses[i] = tx.NewClause(c.To).WithData(data).WithValue(value) } return } func (a *Accounts) handleRevision(revision string) (*block.Header, error) { if revision == "" || revision == "best" { return a.chain.BestBlock().Header(), nil } if len(revision) == 66 || len(revision) == 64 { blockID, err := polo.ParseBytes32(revision) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } h, err := a.chain.GetBlockHeader(blockID) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } n, err := strconv.ParseUint(revision, 0, 0) if err != nil { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } if n > math.MaxUint32 { return nil, utils.BadRequest(errors.WithMessage(errors.New("block number out of max uint32"), "revision")) } h, err := a.chain.GetTrunkBlockHeader(uint32(n)) if err != nil { if a.chain.IsNotFound(err) { return nil, utils.BadRequest(errors.WithMessage(err, "revision")) } return nil, err } return h, nil } func (a *Accounts) Mount(root *mux.Router, pathPrefix string) { sub := root.PathPrefix(pathPrefix).Subrouter() sub.Path("/*").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallBatchCode)) sub.Path("/{address}").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetAccount)) sub.Path("/{address}/code").Methods(http.MethodGet).HandlerFunc(utils.WrapHandlerFunc(a.handleGetCode)) sub.Path("/{address}/storage/{key}").Methods("GET").HandlerFunc(utils.WrapHandlerFunc(a.handleGetStorage)) sub.Path("").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) sub.Path("/{address}").Methods("POST").HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract)) }
andleGetAccount(
identifier_name
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>>
#[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn request_tile_on_connected( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD); if !in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else { mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right { acts.send(CameraBPAction::MoveRight(Some(w))) } if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
{ let opt = Opt::from_args(); run(opt) }
identifier_body
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>> { let opt = Opt::from_args(); run(opt) } #[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn
( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD); if !in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else { mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right { acts.send(CameraBPAction::MoveRight(Some(w))) } if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
request_tile_on_connected
identifier_name
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>> { let opt = Opt::from_args(); run(opt) } #[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn request_tile_on_connected( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD);
mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right { acts.send(CameraBPAction::MoveRight(Some(w))) } if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
if !in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else {
random_line_split
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>> { let opt = Opt::from_args(); run(opt) } #[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn request_tile_on_connected( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD); if !in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else { mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right
if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
{ acts.send(CameraBPAction::MoveRight(Some(w))) }
conditional_block
rzline.py
# -*- coding: utf-8 -*- import os import json import time import copy import scrapy import pymysql from scrapy.exceptions import CloseSpider from Bill.util import misc from scrapy.conf import settings from Bill.items import RzlineItem from Bill.util.send_email import EmailSender Today = time.strftime("%Y%m%d") Today1 = time.strftime("%Y-%m-%d") class RzlineSpider(scrapy.Spider): name = 'rzline' allowed_domains = ['rzline.com'] start_urls = ['http://www.rzline.com/web/front/quoteMarket/show'] except_company = ['沃泉金融', '杭州沃泉金融', '杭州沃泉金融服务外包有限公司'] custom_settings = { "LOG_LEVEL": "INFO", 'LOG_FILE': os.path.join(settings['LOG_DIR'], name, Today + '.txt'), 'DOWNLOADER_MIDDLEWARES': { 'Bill.middlewares.RandomUserAgentMiddleware': 544, 'Bill.middlewares.RzlineMiddleware': 546, } } city_list = [ '北京市', '哈尔滨市', '长春市', '沈阳市', '天津市', '呼和浩特市', '乌鲁木齐市', '银川市', '西宁市', '兰州市', '西安市', '拉萨市', '成都市', '重庆市', '贵阳市', '昆明市', '太原市', '石家庄市', '济南市', '郑州市', '合肥市', '南京市', '上海市', '武汉市', '长沙市', '南昌市', '杭州市', '福州市', '台北市' '南宁市', '海口市', '广州市', '深圳市', ] # 查询日期 quoteDate = time.mktime(time.strptime(Today, '%Y%m%d')) quoteDate = str(quoteDate).replace('.', '') + '00' quoteType_dict = { 'e': '电票', 'se': '小电票', # 's': '纸票', # 'b': '商票', } # 类型字典 kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农', 'busEle': '电子', 'busPaper': '纸质', 'gq': '国企', 'yq': '央企', 'ss': '上市公司', 'my': '民营企业'} # 业务类型字典 busType_dict = {"1": "买断", "2": "直贴", "0": ""} # 查询条数 rows = 100 formdata = { "page": 1, "city": "", "rows": rows, "orderBy": "2", "quoteType": "", "detailType": "", "quoteDate": quoteDate, "appVersion": "iOS2.6.1", "ifDefaultCity": "false", } formdata1 = { "quoteDate": "", "quoteType": "", "orgUserId": "", "appVersion": "2.6.1", } headers = { "Content-Length": "82", "Host": "www.rzline.com", "Connection": "keep-alive", "Origin": "http://www.rzline.com", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/json;charset=UTF-8", # 必须添加 "Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加 "Referer": "http://www.rzline.com/web/front/quoteMarket/show", } # 连接数据库 db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345') cur = db.cursor() def parse(self, response): base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' for city in self.city_list: for type in self.quoteType_dict: formdata = copy.deepcopy(self.formdata) formdata['city'] = city formdata['quoteType'] = type if city == '上海市': formdata['ifDefaultCity'] = 'true' pass url = base_url + '&page=' + str(formdata['page']) yield scrapy.Request( url=url, dont_filter=True, callback=self.parse_id, errback=self.hand_error, meta={'data': formdata, 'header': self.headers, 'city': city}, ) def parse_id(self, response): flag = 1 city = response.meta['city'] data = response.body.decode() json_data = json.loads(data) id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail' print('当前页数:{}'.format(response.meta['data']['page'])) print('data=', response.meta['data']) try: data_list = json_data['data'] if data_list: for i in data_list: user_id = i['orgUserId'] company = i['orgSimpleName'] price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else [] if len(price_list): formdata1 = copy.deepcopy(self.formdata1) formdata1['orgUserId'] = str(user_id) formdata1['quoteDate'] = i['quoteDate'] formdata1['quoteType'] = response.meta['data']['quoteType'] url = id_url + '&orgUserId=' + str(user_id) + \ '&quoteType=' + formdata1['quoteType'] print(user_id, ':', company, ':', len(price_list), ':', city, ':', formdata1['quoteType']) self.logger.info(str(user_id)+':'+company+':'+str(len(price_list)) + ':' + city + ':' + formdata1['quoteType']) yield scrapy.Request(url=url, priority=1, callback=self.parse_detail, errback=self.hand_error, meta={'data': formdata1, 'city': city, 'header': self.headers, 'user_id': user_id, 'quoteType': formdata1['quoteType'], } ) else: flag = 0 except Exception as e: # 发送邮件 title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider # # 下一页: rows设置为100时,不需要下一页 # if flag: # # self.logger.debug('当前页数:{}'.format(response.meta['data']['page'])) # print('当前页数:{}'.format(response.meta['data']['page'])) # print('data=', response.meta['data'])
# formdata['page'] += 1 # base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' # url = base_url + '&page=' + str(formdata['page']) # yield scrapy.Request( # url=url, # callback=self.parse_id, # errback=self.hand_error, # meta={'data': formdata, # 'city': city, # 'header': self.headers}, # ) def parse_detail(self, response): res = response.body.decode('utf-8') data = json.loads(res) data = data['data'] price_list = data.get('quotePriceDetailList', []) if not price_list: print('formdata1:', response.meta['data']) else: print('city', response.meta['city']) for price in price_list: item = RzlineItem() try: # 发布时间 F2 = price['createTime'] if price['createTime'] else None F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else '' item['F2'] = F2 # 机构 # simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else '' whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else '' item['F3'] = whole_name if whole_name else '' # 金额 item['F4'] = price['price'] if price['price'] else '' # 类型 detail_type = price['detailType'] if price['detailType'] else '' quote_btype = price['quoteBType'] if price['quoteBType'] else '' if detail_type in ['gg', 'sh', 'sn']: kind = self.kind_dict[detail_type] elif detail_type in ['busEle', 'busPaper']: kind = self.kind_dict[quote_btype] else: kind = '' item['F5'] = kind # 每十万 F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else '' item['F6'] = F6 + '元' if F6 else '' # 期限 F7 = price['quoteDays'] if price['quoteDays'] else '' item['F7'] = F7 # 额度 item['F8'] = price['quoteAmount'] # 业务类型(买断、直贴) item['F9'] = '直贴' # 联系人 F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else '' item['F10'] = F10 # 联系方式 F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else '' item['F11'] = F11 item['F12'] = self.quoteType_dict[response.meta['quoteType']] # 原始业务类型 item['F13'] = self.busType_dict[data['busType']] if data['busType'] else '' # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 item['F1'] = self._get_uuid(item) # FT, FV, FP, FU, FS FS = data['ifStartTrad'] if data['ifStartTrad'] else None item['FS'] = 0 if FS != "1" else 1 item['FP'] = int(time.strftime("%Y%m%d%H%M%S")) item['FU'] = int(time.strftime("%Y%m%d%H%M%S")) if item['F3'] in self.except_company: return if Today in item['F2']: print('今日数据:') print(item, response.meta['user_id']) self.logger.info(item['F3']+':'+str(response.meta['user_id'])+':'+str(item['F2'])) yield item else: print('非今日的数据:') F2 = '-'.join((item['F2'][:4], item['F2'][4:6], item['F2'][6:8])) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(F2)) day = self.cur.fetchone()[0] start = time.mktime(time.strptime(item['F2'][:8], '%Y%m%d')) next_day = time.strftime('%Y-%m-%d', time.localtime(start + (24*60*60))) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(next_day)) next = self.cur.fetchone()[0] end = time.mktime(time.strptime(Today, '%Y%m%d')) days = int(end - start) // (24 * 60 * 60) if day == '1': if days == 1 or (days == 3 and next == '2'): t = item['F2'][8:] if t > '165000': print(day, days, next) item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) else: if days <= 3: item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) except Exception as e: title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider @staticmethod def _get_uuid(item): # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 uu_str = item['F2'][:8] + item['F3'] + item['F5'] + item['F7'] \ + item['F8'] + item['F13'] + item['F12'] uu_id = misc.get_uuid(uu_str) return uu_id def hand_error(self, failture): print(failture)
# formdata = response.meta['data']
random_line_split
rzline.py
# -*- coding: utf-8 -*- import os import json import time import copy import scrapy import pymysql from scrapy.exceptions import CloseSpider from Bill.util import misc from scrapy.conf import settings from Bill.items import RzlineItem from Bill.util.send_email import EmailSender Today = time.strftime("%Y%m%d") Today1 = time.strftime("%Y-%m-%d") class RzlineSpider(scrapy.Spider): name = 'rzline' allowed_domains = ['rzline.com'] start_urls = ['http://www.rzline.com/web/front/quoteMarket/show'] except_company = ['沃泉金融', '杭州沃泉金融', '杭州沃泉金融服务外包有限公司'] custom_settings = { "LOG_LEVEL": "INFO", 'LOG_FILE': os.path.join(settings['LOG_DIR'], name, Today + '.txt'), 'DOWNLOADER_MIDDLEWARES': { 'Bill.middlewares.RandomUserAgentMiddleware': 544, 'Bill.middlewares.RzlineMiddleware': 546, } } city_list = [ '北京市', '哈尔滨市', '长春市', '沈阳市', '天津市', '呼和浩特市', '乌鲁木齐市', '银川市', '西宁市', '兰州市', '西安市', '拉萨市', '成都市', '重庆市', '贵阳市', '昆明市', '太原市', '石家庄市', '济南市', '郑州市', '合肥市', '南京市', '上海市', '武汉市', '长沙市', '南昌市', '杭州市', '福州市', '台北市' '南宁市', '海口市', '广州市', '深圳市', ] # 查询日期 quoteDate = time.mktime(time.strptime(Today, '%Y%m%d')) quoteDate = str(quoteDate).replace('.', '') + '00' quoteType_dict = { 'e': '电票', 'se': '小电票', # 's': '纸票', # 'b': '商票', } # 类型字典 kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农', 'busEle': '电子', 'busPaper': '纸质', 'gq': '国企', 'yq': '央企', 'ss': '上市公司', 'my': '民营企业'} # 业务类型字典 busType_dict = {"1": "买断", "2": "直贴", "0": ""} # 查询条数 rows = 100 formdata = { "page": 1, "city": "", "rows": rows, "orderBy": "2", "quoteType": "", "detailType": "", "quoteDate": quoteDate, "appVersion": "iOS2.6.1", "ifDefaultCity": "false", } formdata1 = { "quoteDate": "", "quoteType": "", "orgUserId": "", "appVersion": "2.6.1", } headers = { "Content-Length": "82", "Host": "www.rzline.com", "Connection": "keep-alive", "Origin": "http://www.rzline.com", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/json;charset=UTF-8", # 必须添加 "Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加 "Referer": "http://www.rzline.com/web/front/quoteMarket/show", } # 连接数据库 db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345') cur = db.cursor() def parse(self, response): base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' for city in self.city_list: for type in self.quoteType_dict: formdata = copy.deepcopy(self.formdata) formdata['city'] = city formdata['quoteType'] = type if city == '上海市': formdata['ifDefaultCity'] = 'true' pass url = base_url + '&page=' + str(formdata['page']) yield scrapy.Request( url=url, dont_filter=True, callback=self.parse_id, errback=self.hand_error, meta={'data': formdata, 'header': self.headers, 'city': city}, ) def parse_id(self, response): flag = 1 city = response.meta['city'] data = response.body.decode() json_data = json.loads(data) id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail' print('当前页数:{}'.format(response.meta['data']['page'])) print('data=', response.meta['data']) try: data_list = json_data['data']
f data_list: for i in data_list: user_id = i['orgUserId'] company = i['orgSimpleName'] price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else [] if len(price_list): formdata1 = copy.deepcopy(self.formdata1) formdata1['orgUserId'] = str(user_id) formdata1['quoteDate'] = i['quoteDate'] formdata1['quoteType'] = response.meta['data']['quoteType'] url = id_url + '&orgUserId=' + str(user_id) + \ '&quoteType=' + formdata1['quoteType'] print(user_id, ':', company, ':', len(price_list), ':', city, ':', formdata1['quoteType']) self.logger.info(str(user_id)+':'+company+':'+str(len(price_list)) + ':' + city + ':' + formdata1['quoteType']) yield scrapy.Request(url=url, priority=1, callback=self.parse_detail, errback=self.hand_error, meta={'data': formdata1, 'city': city, 'header': self.headers, 'user_id': user_id, 'quoteType': formdata1['quoteType'], } ) else: flag = 0 except Exception as e: # 发送邮件 title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider # # 下一页: rows设置为100时,不需要下一页 # if flag: # # self.logger.debug('当前页数:{}'.format(response.meta['data']['page'])) # print('当前页数:{}'.format(response.meta['data']['page'])) # print('data=', response.meta['data']) # formdata = response.meta['data'] # formdata['page'] += 1 # base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' # url = base_url + '&page=' + str(formdata['page']) # yield scrapy.Request( # url=url, # callback=self.parse_id, # errback=self.hand_error, # meta={'data': formdata, # 'city': city, # 'header': self.headers}, # ) def parse_detail(self, response): res = response.body.decode('utf-8') data = json.loads(res) data = data['data'] price_list = data.get('quotePriceDetailList', []) if not price_list: print('formdata1:', response.meta['data']) else: print('city', response.meta['city']) for price in price_list: item = RzlineItem() try: # 发布时间 F2 = price['createTime'] if price['createTime'] else None F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else '' item['F2'] = F2 # 机构 # simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else '' whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else '' item['F3'] = whole_name if whole_name else '' # 金额 item['F4'] = price['price'] if price['price'] else '' # 类型 detail_type = price['detailType'] if price['detailType'] else '' quote_btype = price['quoteBType'] if price['quoteBType'] else '' if detail_type in ['gg', 'sh', 'sn']: kind = self.kind_dict[detail_type] elif detail_type in ['busEle', 'busPaper']: kind = self.kind_dict[quote_btype] else: kind = '' item['F5'] = kind # 每十万 F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else '' item['F6'] = F6 + '元' if F6 else '' # 期限 F7 = price['quoteDays'] if price['quoteDays'] else '' item['F7'] = F7 # 额度 item['F8'] = price['quoteAmount'] # 业务类型(买断、直贴) item['F9'] = '直贴' # 联系人 F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else '' item['F10'] = F10 # 联系方式 F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else '' item['F11'] = F11 item['F12'] = self.quoteType_dict[response.meta['quoteType']] # 原始业务类型 item['F13'] = self.busType_dict[data['busType']] if data['busType'] else '' # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 item['F1'] = self._get_uuid(item) # FT, FV, FP, FU, FS FS = data['ifStartTrad'] if data['ifStartTrad'] else None item['FS'] = 0 if FS != "1" else 1 item['FP'] = int(time.strftime("%Y%m%d%H%M%S")) item['FU'] = int(time.strftime("%Y%m%d%H%M%S")) if item['F3'] in self.except_company: return if Today in item['F2']: print('今日数据:') print(item, response.meta['user_id']) self.logger.info(item['F3']+':'+str(response.meta['user_id'])+':'+str(item['F2'])) yield item else: print('非今日的数据:') F2 = '-'.join((item['F2'][:4], item['F2'][4:6], item['F2'][6:8])) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(F2)) day = self.cur.fetchone()[0] start = time.mktime(time.strptime(item['F2'][:8], '%Y%m%d')) next_day = time.strftime('%Y-%m-%d', time.localtime(start + (24*60*60))) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(next_day)) next = self.cur.fetchone()[0] end = time.mktime(time.strptime(Today, '%Y%m%d')) days = int(end - start) // (24 * 60 * 60) if day == '1': if days == 1 or (days == 3 and next == '2'): t = item['F2'][8:] if t > '165000': print(day, days, next) item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) else: if days <= 3: item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) except Exception as e: title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider @staticmethod def _get_uuid(item): # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 uu_str = item['F2'][:8] + item['F3'] + item['F5'] + item['F7'] \ + item['F8'] + item['F13'] + item['F12'] uu_id = misc.get_uuid(uu_str) return uu_id def hand_error(self, failture): print(failture)
i
identifier_name
rzline.py
# -*- coding: utf-8 -*- import os import json import time import copy import scrapy import pymysql from scrapy.exceptions import CloseSpider from Bill.util import misc from scrapy.conf import settings from Bill.items import RzlineItem from Bill.util.send_email import EmailSender Today = time.strftime("%Y%m%d") Today1 = time.strftime("%Y-%m-%d") class RzlineSpider(scrapy.Spider): name = 'rzline' allowed_domains = ['rzline.com'] start_urls = ['http://www.rzline.com/web/front/quoteMarket/show'] except_company = ['沃泉金融', '杭州沃泉金融', '杭州沃泉金融服务外包有限公司'] custom_settings = { "LOG_LEVEL": "INFO", 'LOG_FILE': os.path.join(settings['LOG_DIR'], name, Today + '.txt'), 'DOWNLOADER_MIDDLEWARES': { 'Bill.middlewares.RandomUserAgentMiddleware': 544, 'Bill.middlewares.RzlineMiddleware': 546, } } city_list = [ '北京市', '哈尔滨市', '长春市', '沈阳市', '天津市', '呼和浩特市', '乌鲁木齐市', '银川市', '西宁市', '兰州市', '西安市', '拉萨市', '成都市', '重庆市', '贵阳市', '昆明市', '太原市', '石家庄市', '济南市', '郑州市', '合肥市', '南京市', '上海市', '武汉市', '长沙市', '南昌市', '杭州市', '福州市', '台北市' '南宁市', '海口市', '广州市', '深圳市', ] # 查询日期 quoteDate = time.mktime(time.strptime(Today, '%Y%m%d')) quoteDate = str(quoteDate).replace('.', '') + '00' quoteType_dict = { 'e': '电票', 'se': '小电票', # 's': '纸票', # 'b': '商票', } # 类型字典 kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农', 'busEle': '电子', 'busPaper': '纸质', 'gq': '国企', 'yq': '央企', 'ss': '上市公司', 'my': '民营企业'} # 业务类型字典 busType_dict = {"1": "买断", "2": "直贴", "0": ""} # 查询条数 rows = 100 formdata = { "page": 1, "city": "", "rows": rows, "orderBy": "2", "quoteType": "", "detailType": "", "quoteDate": quoteDate, "appVersion": "iOS2.6.1", "ifDefaultCity": "false", } formdata1 = { "quoteDate": "", "quoteType": "", "orgUserId": "", "appVersion": "2.6.1", } headers = { "Content-Length": "82", "Host": "www.rzline.com", "Connection": "keep-alive", "Origin": "http://www.rzline.com", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/json;charset=UTF-8", # 必须添加 "Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加 "Referer": "http://www.rzline.com/web/front/quoteMarket/show", } # 连接数据库 db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345') cur = db.cursor() def parse(self, response): base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' for city in self.city_list: for type in self.quoteType_dict: formdata = copy.deepcopy(self.formdata) formdata['city'] = city formdata['quoteType'] = type if city == '上海市': formdata['ifDefaultCity'] = 'true' pass url = base_url + '&page=' + str(formdata['page']) yield scrapy.Request( url=url, dont_filter=True, callback=self.parse_id, errback=self.hand_error, meta={'data': formdata, 'header': self.headers, 'city':
flag = 1 city = response.meta['city'] data = response.body.decode() json_data = json.loads(data) id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail' print('当前页数:{}'.format(response.meta['data']['page'])) print('data=', response.meta['data']) try: data_list = json_data['data'] if data_list: for i in data_list: user_id = i['orgUserId'] company = i['orgSimpleName'] price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else [] if len(price_list): formdata1 = copy.deepcopy(self.formdata1) formdata1['orgUserId'] = str(user_id) formdata1['quoteDate'] = i['quoteDate'] formdata1['quoteType'] = response.meta['data']['quoteType'] url = id_url + '&orgUserId=' + str(user_id) + \ '&quoteType=' + formdata1['quoteType'] print(user_id, ':', company, ':', len(price_list), ':', city, ':', formdata1['quoteType']) self.logger.info(str(user_id)+':'+company+':'+str(len(price_list)) + ':' + city + ':' + formdata1['quoteType']) yield scrapy.Request(url=url, priority=1, callback=self.parse_detail, errback=self.hand_error, meta={'data': formdata1, 'city': city, 'header': self.headers, 'user_id': user_id, 'quoteType': formdata1['quoteType'], } ) else: flag = 0 except Exception as e: # 发送邮件 title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider # # 下一页: rows设置为100时,不需要下一页 # if flag: # # self.logger.debug('当前页数:{}'.format(response.meta['data']['page'])) # print('当前页数:{}'.format(response.meta['data']['page'])) # print('data=', response.meta['data']) # formdata = response.meta['data'] # formdata['page'] += 1 # base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' # url = base_url + '&page=' + str(formdata['page']) # yield scrapy.Request( # url=url, # callback=self.parse_id, # errback=self.hand_error, # meta={'data': formdata, # 'city': city, # 'header': self.headers}, # ) def parse_detail(self, response): res = response.body.decode('utf-8') data = json.loads(res) data = data['data'] price_list = data.get('quotePriceDetailList', []) if not price_list: print('formdata1:', response.meta['data']) else: print('city', response.meta['city']) for price in price_list: item = RzlineItem() try: # 发布时间 F2 = price['createTime'] if price['createTime'] else None F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else '' item['F2'] = F2 # 机构 # simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else '' whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else '' item['F3'] = whole_name if whole_name else '' # 金额 item['F4'] = price['price'] if price['price'] else '' # 类型 detail_type = price['detailType'] if price['detailType'] else '' quote_btype = price['quoteBType'] if price['quoteBType'] else '' if detail_type in ['gg', 'sh', 'sn']: kind = self.kind_dict[detail_type] elif detail_type in ['busEle', 'busPaper']: kind = self.kind_dict[quote_btype] else: kind = '' item['F5'] = kind # 每十万 F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else '' item['F6'] = F6 + '元' if F6 else '' # 期限 F7 = price['quoteDays'] if price['quoteDays'] else '' item['F7'] = F7 # 额度 item['F8'] = price['quoteAmount'] # 业务类型(买断、直贴) item['F9'] = '直贴' # 联系人 F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else '' item['F10'] = F10 # 联系方式 F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else '' item['F11'] = F11 item['F12'] = self.quoteType_dict[response.meta['quoteType']] # 原始业务类型 item['F13'] = self.busType_dict[data['busType']] if data['busType'] else '' # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 item['F1'] = self._get_uuid(item) # FT, FV, FP, FU, FS FS = data['ifStartTrad'] if data['ifStartTrad'] else None item['FS'] = 0 if FS != "1" else 1 item['FP'] = int(time.strftime("%Y%m%d%H%M%S")) item['FU'] = int(time.strftime("%Y%m%d%H%M%S")) if item['F3'] in self.except_company: return if Today in item['F2']: print('今日数据:') print(item, response.meta['user_id']) self.logger.info(item['F3']+':'+str(response.meta['user_id'])+':'+str(item['F2'])) yield item else: print('非今日的数据:') F2 = '-'.join((item['F2'][:4], item['F2'][4:6], item['F2'][6:8])) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(F2)) day = self.cur.fetchone()[0] start = time.mktime(time.strptime(item['F2'][:8], '%Y%m%d')) next_day = time.strftime('%Y-%m-%d', time.localtime(start + (24*60*60))) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(next_day)) next = self.cur.fetchone()[0] end = time.mktime(time.strptime(Today, '%Y%m%d')) days = int(end - start) // (24 * 60 * 60) if day == '1': if days == 1 or (days == 3 and next == '2'): t = item['F2'][8:] if t > '165000': print(day, days, next) item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) else: if days <= 3: item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) except Exception as e: title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider @staticmethod def _get_uuid(item): # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 uu_str = item['F2'][:8] + item['F3'] + item['F5'] + item['F7'] \ + item['F8'] + item['F13'] + item['F12'] uu_id = misc.get_uuid(uu_str) return uu_id def hand_error(self, failture): print(failture)
city}, ) def parse_id(self, response):
conditional_block
rzline.py
# -*- coding: utf-8 -*- import os import json import time import copy import scrapy import pymysql from scrapy.exceptions import CloseSpider from Bill.util import misc from scrapy.conf import settings from Bill.items import RzlineItem from Bill.util.send_email import EmailSender Today = time.strftime("%Y%m%d") Today1 = time.strftime("%Y-%m-%d") class RzlineSpider(scrapy.Spider): name = 'rzline' allowed_domains = ['rzline.com'] start_urls = ['http://www.rzline.com/web/front/quoteMarket/show'] except_company = ['沃泉金融', '杭州沃泉金融', '杭州沃泉金融服务外包有限公司'] custom_settings = { "LOG_LEVEL": "INFO", 'LOG_FILE': os.path.join(settings['LOG_DIR'], name, Today + '.txt'), 'DOWNLOADER_MIDDLEWARES': { 'Bill.middlewares.RandomUserAgentMiddleware': 544, 'Bill.middlewares.RzlineMiddleware': 546, } } city_list = [ '北京市', '哈尔滨市', '长春市', '沈阳市', '天津市', '呼和浩特市', '乌鲁木齐市', '银川市', '西宁市', '兰州市', '西安市', '拉萨市', '成都市', '重庆市', '贵阳市', '昆明市', '太原市', '石家庄市', '济南市', '郑州市', '合肥市', '南京市', '上海市', '武汉市', '长沙市', '南昌市', '杭州市', '福州市', '台北市' '南宁市', '海口市', '广州市', '深圳市', ] # 查询日期 quoteDate = time.mktime(time.strptime(Today, '%Y%m%d')) quoteDate = str(quoteDate).replace('.', '') + '00' quoteType_dict = { 'e': '电票', 'se': '小电票', # 's': '纸票', # 'b': '商票', } # 类型字典 kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农', 'busEle': '电子', 'busPaper': '纸质', 'gq': '国企', 'yq': '央企', 'ss': '上市公司', 'my': '民营企业'} # 业务类型字典 busType_dict = {"1": "买断", "2": "直贴", "0": ""} # 查询条数 rows = 100 formdata = { "page": 1, "city": "", "rows": rows, "orderBy": "2", "quoteType": "", "detailType": "", "quoteDate": quoteDate, "appVersion": "iOS2.6.1", "ifDefaultCity": "false", } formdata1 = { "quoteDate": "", "quoteType": "", "orgUserId": "", "appVersion": "2.6.1", } headers = { "Content-Length": "82", "Host": "www.rzline.com", "Connection": "keep-alive", "Origin": "http://www.rzline.com", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/json;charset=UTF-8", # 必须添加 "Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加 "Referer": "http://www.rzline.com/web/front/quoteMarket/show", } # 连接数据库 db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345') cur = db.cursor() def parse(self, response): base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' for city in self.city_list: for type in self.quoteType_dict: formdata = copy.deepcopy(self.formdata) formdata['city'] = city formdata['quoteType'] = type if city == '上海市': formdata['ifDefaultCity'] = 'true'
ta'] if data_list: for i in data_list: user_id = i['orgUserId'] company = i['orgSimpleName'] price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else [] if len(price_list): formdata1 = copy.deepcopy(self.formdata1) formdata1['orgUserId'] = str(user_id) formdata1['quoteDate'] = i['quoteDate'] formdata1['quoteType'] = response.meta['data']['quoteType'] url = id_url + '&orgUserId=' + str(user_id) + \ '&quoteType=' + formdata1['quoteType'] print(user_id, ':', company, ':', len(price_list), ':', city, ':', formdata1['quoteType']) self.logger.info(str(user_id)+':'+company+':'+str(len(price_list)) + ':' + city + ':' + formdata1['quoteType']) yield scrapy.Request(url=url, priority=1, callback=self.parse_detail, errback=self.hand_error, meta={'data': formdata1, 'city': city, 'header': self.headers, 'user_id': user_id, 'quoteType': formdata1['quoteType'], } ) else: flag = 0 except Exception as e: # 发送邮件 title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider # # 下一页: rows设置为100时,不需要下一页 # if flag: # # self.logger.debug('当前页数:{}'.format(response.meta['data']['page'])) # print('当前页数:{}'.format(response.meta['data']['page'])) # print('data=', response.meta['data']) # formdata = response.meta['data'] # formdata['page'] += 1 # base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow' # url = base_url + '&page=' + str(formdata['page']) # yield scrapy.Request( # url=url, # callback=self.parse_id, # errback=self.hand_error, # meta={'data': formdata, # 'city': city, # 'header': self.headers}, # ) def parse_detail(self, response): res = response.body.decode('utf-8') data = json.loads(res) data = data['data'] price_list = data.get('quotePriceDetailList', []) if not price_list: print('formdata1:', response.meta['data']) else: print('city', response.meta['city']) for price in price_list: item = RzlineItem() try: # 发布时间 F2 = price['createTime'] if price['createTime'] else None F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else '' item['F2'] = F2 # 机构 # simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else '' whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else '' item['F3'] = whole_name if whole_name else '' # 金额 item['F4'] = price['price'] if price['price'] else '' # 类型 detail_type = price['detailType'] if price['detailType'] else '' quote_btype = price['quoteBType'] if price['quoteBType'] else '' if detail_type in ['gg', 'sh', 'sn']: kind = self.kind_dict[detail_type] elif detail_type in ['busEle', 'busPaper']: kind = self.kind_dict[quote_btype] else: kind = '' item['F5'] = kind # 每十万 F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else '' item['F6'] = F6 + '元' if F6 else '' # 期限 F7 = price['quoteDays'] if price['quoteDays'] else '' item['F7'] = F7 # 额度 item['F8'] = price['quoteAmount'] # 业务类型(买断、直贴) item['F9'] = '直贴' # 联系人 F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else '' item['F10'] = F10 # 联系方式 F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else '' item['F11'] = F11 item['F12'] = self.quoteType_dict[response.meta['quoteType']] # 原始业务类型 item['F13'] = self.busType_dict[data['busType']] if data['busType'] else '' # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 item['F1'] = self._get_uuid(item) # FT, FV, FP, FU, FS FS = data['ifStartTrad'] if data['ifStartTrad'] else None item['FS'] = 0 if FS != "1" else 1 item['FP'] = int(time.strftime("%Y%m%d%H%M%S")) item['FU'] = int(time.strftime("%Y%m%d%H%M%S")) if item['F3'] in self.except_company: return if Today in item['F2']: print('今日数据:') print(item, response.meta['user_id']) self.logger.info(item['F3']+':'+str(response.meta['user_id'])+':'+str(item['F2'])) yield item else: print('非今日的数据:') F2 = '-'.join((item['F2'][:4], item['F2'][4:6], item['F2'][6:8])) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(F2)) day = self.cur.fetchone()[0] start = time.mktime(time.strptime(item['F2'][:8], '%Y%m%d')) next_day = time.strftime('%Y-%m-%d', time.localtime(start + (24*60*60))) self.cur.execute("select f002_ths032 from pljr.ths032 " "where f003_ths032 = '212004' " "and f001_ths032 like '{}%'".format(next_day)) next = self.cur.fetchone()[0] end = time.mktime(time.strptime(Today, '%Y%m%d')) days = int(end - start) // (24 * 60 * 60) if day == '1': if days == 1 or (days == 3 and next == '2'): t = item['F2'][8:] if t > '165000': print(day, days, next) item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) else: if days <= 3: item['F2'] = Today + '080000' item['F1'] = self._get_uuid(item) print(item, response.meta['user_id']) yield item self.logger.info( item['F3'] + ':' + str(response.meta['user_id']) + ':' + str(item['F2'])) except Exception as e: title = '爬虫' + self.name + '异常' error_info = misc.get_error_info(str(e)) content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason'] EmailSender().send(title, content) raise CloseSpider @staticmethod def _get_uuid(item): # 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票 uu_str = item['F2'][:8] + item['F3'] + item['F5'] + item['F7'] \ + item['F8'] + item['F13'] + item['F12'] uu_id = misc.get_uuid(uu_str) return uu_id def hand_error(self, failture): print(failture)
pass url = base_url + '&page=' + str(formdata['page']) yield scrapy.Request( url=url, dont_filter=True, callback=self.parse_id, errback=self.hand_error, meta={'data': formdata, 'header': self.headers, 'city': city}, ) def parse_id(self, response): flag = 1 city = response.meta['city'] data = response.body.decode() json_data = json.loads(data) id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail' print('当前页数:{}'.format(response.meta['data']['page'])) print('data=', response.meta['data']) try: data_list = json_data['da
identifier_body
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword, // otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty() { (query.into(), QueryType::StartWith, UsageMatcher::default()) } else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error = ?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self, ctx: &mut Context) -> Result<()> { let cur
_typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
rent_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? .get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on
identifier_body
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword,
// otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty() { (query.into(), QueryType::StartWith, UsageMatcher::default()) } else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error = ?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self, ctx: &mut Context) -> Result<()> { let current_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? .get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on_typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
random_line_split
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword, // otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty()
else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error = ?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self, ctx: &mut Context) -> Result<()> { let current_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? .get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on_typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
{ (query.into(), QueryType::StartWith, UsageMatcher::default()) }
conditional_block
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword, // otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty() { (query.into(), QueryType::StartWith, UsageMatcher::default()) } else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error = ?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self
&mut Context) -> Result<()> { let current_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? .get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on_typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
, ctx:
identifier_name
service.go
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logsearch import ( "context" "io/ioutil" "net/http" "strconv" "strings" "github.com/gin-gonic/gin" "github.com/pingcap/log" "go.uber.org/fx" "go.uber.org/zap" "github.com/pingcap/tidb-dashboard/pkg/apiserver/model" "github.com/pingcap/tidb-dashboard/pkg/apiserver/user" "github.com/pingcap/tidb-dashboard/pkg/apiserver/utils" "github.com/pingcap/tidb-dashboard/pkg/config" "github.com/pingcap/tidb-dashboard/pkg/dbstore" ) type Service struct { // FIXME: Use fx.In lifecycleCtx context.Context config *config.Config logStoreDirectory string db *dbstore.DB scheduler *Scheduler } func NewService(lc fx.Lifecycle, config *config.Config, db *dbstore.DB) *Service { dir := config.TempDir if dir == "" { var err error dir, err = ioutil.TempDir("", "dashboard-logs") if err != nil { log.Fatal("Failed to create directory for storing logs", zap.Error(err)) } } err := autoMigrate(db) if err != nil { log.Fatal("Failed to initialize database", zap.Error(err)) } cleanupAllTasks(db) service := &Service{ config: config, logStoreDirectory: dir, db: db, scheduler: nil, // will be filled after scheduler is created } scheduler := NewScheduler(service) service.scheduler = scheduler lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { service.lifecycleCtx = ctx return nil }, }) return service } func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) { endpoint := r.Group("/logs") { endpoint.GET("/download", s.DownloadLogs) endpoint.Use(auth.MWAuthRequired()) { endpoint.GET("/download/acquire_token", s.GetDownloadToken) endpoint.PUT("/taskgroup", s.CreateTaskGroup) endpoint.GET("/taskgroups", s.GetAllTaskGroups) endpoint.GET("/taskgroups/:id", s.GetTaskGroup) endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview) endpoint.POST("/taskgroups/:id/retry", s.RetryTask) endpoint.POST("/taskgroups/:id/cancel", s.CancelTask) endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup) } } } type CreateTaskGroupRequest struct { Request SearchLogRequest `json:"request" binding:"required"` Targets []model.RequestTargetNode `json:"targets" binding:"required"` } type TaskGroupResponse struct { TaskGroup TaskGroupModel `json:"task_group"` Tasks []*TaskModel `json:"tasks"` } // @Summary Create and run a new log search task group // @Param request body CreateTaskGroupRequest true "Request body" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 400 {object} utils.APIError "Bad request" // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroup [put] func (s *Service) CreateTaskGroup(c *gin.Context) { var req CreateTaskGroupRequest if err := c.ShouldBindJSON(&req); err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } if len(req.Targets) == 0 { utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") return } stats := model.NewRequestTargetStatisticsFromArray(&req.Targets) taskGroup := TaskGroupModel{ SearchRequest: &req.Request, State: TaskGroupStateRunning, TargetStats: stats, } if err := s.db.Create(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0, len(req.Targets)) for _, t := range req.Targets { target := t task := &TaskModel{ TaskGroupID: taskGroup.ID, Target: &target, State: TaskStateRunning, } // Ignore task creation errors s.db.Create(task) tasks = append(tasks, task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID)) } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) } // @Summary List all log search task groups // @Security JwtAuth // @Success 200 {array} TaskGroupModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups [get] func (s *Service) GetAllTaskGroups(c *gin.Context) { var taskGroups []*TaskGroupModel err := s.db.Find(&taskGroups).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, taskGroups) } // @Summary List tasks in a log search task group // @Param id path string true "Task Group ID" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [get] func (s *Service) GetTaskGroup(c *gin.Context)
// @Summary Preview a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {array} PreviewModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/preview [get] func (s *Service) GetTaskGroupPreview(c *gin.Context) { taskGroupID := c.Param("id") var lines []PreviewModel err := s.db. Where("task_group_id = ?", taskGroupID). Order("time"). Limit(TaskMaxPreviewLines). Find(&lines).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, lines) } // @Summary Retry failed tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/retry [post] func (s *Service) RetryTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } // Currently we can only retry finished task group. taskGroup := TaskGroupModel{} if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0) if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil { _ = c.Error(err) return } if len(tasks) == 0 { // No tasks to retry c.JSON(http.StatusOK, utils.APIEmptyResponse{}) return } // Reset task status taskGroup.State = TaskGroupStateRunning s.db.Save(&taskGroup) for _, task := range tasks { task.Error = nil task.State = TaskStateRunning s.db.Save(task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID)) } c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Cancel running tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 400 {object} utils.APIError // @Router /logs/taskgroups/{id}/cancel [post] func (s *Service) CancelTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } taskGroup := TaskGroupModel{} err = s.db.First(&taskGroup, taskGroupID).Error if err != nil { _ = c.Error(err) return } if taskGroup.State != TaskGroupStateRunning { utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running") return } s.scheduler.AsyncAbort(uint(taskGroupID)) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Delete a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [delete] func (s *Service) DeleteTaskGroup(c *gin.Context) { taskGroupID := c.Param("id") taskGroup := TaskGroupModel{} err := s.db.Where("id = ? AND state != ?", taskGroupID, TaskGroupStateRunning).First(&taskGroup).Error if err != nil { _ = c.Error(err) return } taskGroup.Delete(s.db) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Generate a download token for downloading logs // @Produce plain // @Param id query []string false "task id" collectionFormat(csv) // @Security JwtAuth // @Success 200 {string} string "xxx" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Router /logs/download/acquire_token [get] func (s *Service) GetDownloadToken(c *gin.Context) { ids := c.QueryArray("id") str := strings.Join(ids, ",") token, err := utils.NewJWTString("logs/download", str) if err != nil { _ = c.Error(err) return } c.String(http.StatusOK, token) } // @Summary Download logs // @Produce application/x-tar,application/zip // @Param token query string true "download token" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/download [get] func (s *Service) DownloadLogs(c *gin.Context) { token := c.Query("token") str, err := utils.ParseJWTString("logs/download", token) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } ids := strings.Split(str, ",") tasks := make([]*TaskModel, 0, len(ids)) for _, id := range ids { var task TaskModel if s.db. Where("id = ? AND state = ?", id, TaskStateFinished). First(&task). Error == nil { tasks = append(tasks, &task) // Ignore errors silently } } switch len(tasks) { case 0: utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") case 1: serveTaskForDownload(tasks[0], c) default: serveMultipleTaskForDownload(tasks, c) } }
{ taskGroupID := c.Param("id") var taskGroup TaskGroupModel var tasks []*TaskModel err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error if err != nil { _ = c.Error(err) return } err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error if err != nil { _ = c.Error(err) return } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) }
identifier_body
service.go
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logsearch import ( "context" "io/ioutil" "net/http" "strconv" "strings" "github.com/gin-gonic/gin" "github.com/pingcap/log" "go.uber.org/fx" "go.uber.org/zap" "github.com/pingcap/tidb-dashboard/pkg/apiserver/model" "github.com/pingcap/tidb-dashboard/pkg/apiserver/user" "github.com/pingcap/tidb-dashboard/pkg/apiserver/utils" "github.com/pingcap/tidb-dashboard/pkg/config" "github.com/pingcap/tidb-dashboard/pkg/dbstore" ) type Service struct { // FIXME: Use fx.In lifecycleCtx context.Context config *config.Config logStoreDirectory string db *dbstore.DB scheduler *Scheduler } func NewService(lc fx.Lifecycle, config *config.Config, db *dbstore.DB) *Service { dir := config.TempDir if dir == "" { var err error dir, err = ioutil.TempDir("", "dashboard-logs") if err != nil { log.Fatal("Failed to create directory for storing logs", zap.Error(err)) } } err := autoMigrate(db) if err != nil { log.Fatal("Failed to initialize database", zap.Error(err)) } cleanupAllTasks(db) service := &Service{ config: config, logStoreDirectory: dir, db: db, scheduler: nil, // will be filled after scheduler is created } scheduler := NewScheduler(service) service.scheduler = scheduler lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { service.lifecycleCtx = ctx return nil }, }) return service } func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) { endpoint := r.Group("/logs") { endpoint.GET("/download", s.DownloadLogs) endpoint.Use(auth.MWAuthRequired()) { endpoint.GET("/download/acquire_token", s.GetDownloadToken) endpoint.PUT("/taskgroup", s.CreateTaskGroup) endpoint.GET("/taskgroups", s.GetAllTaskGroups) endpoint.GET("/taskgroups/:id", s.GetTaskGroup) endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview) endpoint.POST("/taskgroups/:id/retry", s.RetryTask) endpoint.POST("/taskgroups/:id/cancel", s.CancelTask) endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup) } } } type CreateTaskGroupRequest struct { Request SearchLogRequest `json:"request" binding:"required"` Targets []model.RequestTargetNode `json:"targets" binding:"required"` } type TaskGroupResponse struct { TaskGroup TaskGroupModel `json:"task_group"` Tasks []*TaskModel `json:"tasks"` } // @Summary Create and run a new log search task group // @Param request body CreateTaskGroupRequest true "Request body" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 400 {object} utils.APIError "Bad request" // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroup [put] func (s *Service) CreateTaskGroup(c *gin.Context) { var req CreateTaskGroupRequest if err := c.ShouldBindJSON(&req); err != nil
if len(req.Targets) == 0 { utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") return } stats := model.NewRequestTargetStatisticsFromArray(&req.Targets) taskGroup := TaskGroupModel{ SearchRequest: &req.Request, State: TaskGroupStateRunning, TargetStats: stats, } if err := s.db.Create(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0, len(req.Targets)) for _, t := range req.Targets { target := t task := &TaskModel{ TaskGroupID: taskGroup.ID, Target: &target, State: TaskStateRunning, } // Ignore task creation errors s.db.Create(task) tasks = append(tasks, task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID)) } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) } // @Summary List all log search task groups // @Security JwtAuth // @Success 200 {array} TaskGroupModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups [get] func (s *Service) GetAllTaskGroups(c *gin.Context) { var taskGroups []*TaskGroupModel err := s.db.Find(&taskGroups).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, taskGroups) } // @Summary List tasks in a log search task group // @Param id path string true "Task Group ID" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [get] func (s *Service) GetTaskGroup(c *gin.Context) { taskGroupID := c.Param("id") var taskGroup TaskGroupModel var tasks []*TaskModel err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error if err != nil { _ = c.Error(err) return } err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error if err != nil { _ = c.Error(err) return } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) } // @Summary Preview a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {array} PreviewModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/preview [get] func (s *Service) GetTaskGroupPreview(c *gin.Context) { taskGroupID := c.Param("id") var lines []PreviewModel err := s.db. Where("task_group_id = ?", taskGroupID). Order("time"). Limit(TaskMaxPreviewLines). Find(&lines).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, lines) } // @Summary Retry failed tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/retry [post] func (s *Service) RetryTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } // Currently we can only retry finished task group. taskGroup := TaskGroupModel{} if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0) if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil { _ = c.Error(err) return } if len(tasks) == 0 { // No tasks to retry c.JSON(http.StatusOK, utils.APIEmptyResponse{}) return } // Reset task status taskGroup.State = TaskGroupStateRunning s.db.Save(&taskGroup) for _, task := range tasks { task.Error = nil task.State = TaskStateRunning s.db.Save(task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID)) } c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Cancel running tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 400 {object} utils.APIError // @Router /logs/taskgroups/{id}/cancel [post] func (s *Service) CancelTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } taskGroup := TaskGroupModel{} err = s.db.First(&taskGroup, taskGroupID).Error if err != nil { _ = c.Error(err) return } if taskGroup.State != TaskGroupStateRunning { utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running") return } s.scheduler.AsyncAbort(uint(taskGroupID)) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Delete a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [delete] func (s *Service) DeleteTaskGroup(c *gin.Context) { taskGroupID := c.Param("id") taskGroup := TaskGroupModel{} err := s.db.Where("id = ? AND state != ?", taskGroupID, TaskGroupStateRunning).First(&taskGroup).Error if err != nil { _ = c.Error(err) return } taskGroup.Delete(s.db) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Generate a download token for downloading logs // @Produce plain // @Param id query []string false "task id" collectionFormat(csv) // @Security JwtAuth // @Success 200 {string} string "xxx" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Router /logs/download/acquire_token [get] func (s *Service) GetDownloadToken(c *gin.Context) { ids := c.QueryArray("id") str := strings.Join(ids, ",") token, err := utils.NewJWTString("logs/download", str) if err != nil { _ = c.Error(err) return } c.String(http.StatusOK, token) } // @Summary Download logs // @Produce application/x-tar,application/zip // @Param token query string true "download token" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/download [get] func (s *Service) DownloadLogs(c *gin.Context) { token := c.Query("token") str, err := utils.ParseJWTString("logs/download", token) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } ids := strings.Split(str, ",") tasks := make([]*TaskModel, 0, len(ids)) for _, id := range ids { var task TaskModel if s.db. Where("id = ? AND state = ?", id, TaskStateFinished). First(&task). Error == nil { tasks = append(tasks, &task) // Ignore errors silently } } switch len(tasks) { case 0: utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") case 1: serveTaskForDownload(tasks[0], c) default: serveMultipleTaskForDownload(tasks, c) } }
{ utils.MakeInvalidRequestErrorFromError(c, err) return }
conditional_block
service.go
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logsearch import ( "context" "io/ioutil" "net/http" "strconv" "strings" "github.com/gin-gonic/gin" "github.com/pingcap/log" "go.uber.org/fx" "go.uber.org/zap" "github.com/pingcap/tidb-dashboard/pkg/apiserver/model" "github.com/pingcap/tidb-dashboard/pkg/apiserver/user" "github.com/pingcap/tidb-dashboard/pkg/apiserver/utils" "github.com/pingcap/tidb-dashboard/pkg/config" "github.com/pingcap/tidb-dashboard/pkg/dbstore" ) type Service struct { // FIXME: Use fx.In lifecycleCtx context.Context config *config.Config logStoreDirectory string db *dbstore.DB scheduler *Scheduler } func NewService(lc fx.Lifecycle, config *config.Config, db *dbstore.DB) *Service { dir := config.TempDir if dir == "" { var err error dir, err = ioutil.TempDir("", "dashboard-logs") if err != nil { log.Fatal("Failed to create directory for storing logs", zap.Error(err)) } } err := autoMigrate(db) if err != nil { log.Fatal("Failed to initialize database", zap.Error(err)) } cleanupAllTasks(db) service := &Service{ config: config, logStoreDirectory: dir, db: db, scheduler: nil, // will be filled after scheduler is created } scheduler := NewScheduler(service) service.scheduler = scheduler lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { service.lifecycleCtx = ctx return nil }, }) return service } func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) { endpoint := r.Group("/logs") { endpoint.GET("/download", s.DownloadLogs) endpoint.Use(auth.MWAuthRequired()) { endpoint.GET("/download/acquire_token", s.GetDownloadToken) endpoint.PUT("/taskgroup", s.CreateTaskGroup) endpoint.GET("/taskgroups", s.GetAllTaskGroups) endpoint.GET("/taskgroups/:id", s.GetTaskGroup) endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview) endpoint.POST("/taskgroups/:id/retry", s.RetryTask) endpoint.POST("/taskgroups/:id/cancel", s.CancelTask) endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup) } } } type CreateTaskGroupRequest struct { Request SearchLogRequest `json:"request" binding:"required"` Targets []model.RequestTargetNode `json:"targets" binding:"required"` } type TaskGroupResponse struct { TaskGroup TaskGroupModel `json:"task_group"` Tasks []*TaskModel `json:"tasks"` } // @Summary Create and run a new log search task group // @Param request body CreateTaskGroupRequest true "Request body" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 400 {object} utils.APIError "Bad request" // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroup [put] func (s *Service) CreateTaskGroup(c *gin.Context) { var req CreateTaskGroupRequest if err := c.ShouldBindJSON(&req); err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } if len(req.Targets) == 0 { utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") return } stats := model.NewRequestTargetStatisticsFromArray(&req.Targets) taskGroup := TaskGroupModel{ SearchRequest: &req.Request, State: TaskGroupStateRunning, TargetStats: stats, } if err := s.db.Create(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0, len(req.Targets)) for _, t := range req.Targets { target := t task := &TaskModel{ TaskGroupID: taskGroup.ID, Target: &target, State: TaskStateRunning, } // Ignore task creation errors s.db.Create(task) tasks = append(tasks, task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID)) } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) } // @Summary List all log search task groups // @Security JwtAuth // @Success 200 {array} TaskGroupModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups [get] func (s *Service) GetAllTaskGroups(c *gin.Context) { var taskGroups []*TaskGroupModel err := s.db.Find(&taskGroups).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, taskGroups) } // @Summary List tasks in a log search task group // @Param id path string true "Task Group ID" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [get] func (s *Service) GetTaskGroup(c *gin.Context) { taskGroupID := c.Param("id") var taskGroup TaskGroupModel var tasks []*TaskModel err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error if err != nil { _ = c.Error(err) return } err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error if err != nil { _ = c.Error(err) return } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) } // @Summary Preview a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {array} PreviewModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/preview [get] func (s *Service)
(c *gin.Context) { taskGroupID := c.Param("id") var lines []PreviewModel err := s.db. Where("task_group_id = ?", taskGroupID). Order("time"). Limit(TaskMaxPreviewLines). Find(&lines).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, lines) } // @Summary Retry failed tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/retry [post] func (s *Service) RetryTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } // Currently we can only retry finished task group. taskGroup := TaskGroupModel{} if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0) if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil { _ = c.Error(err) return } if len(tasks) == 0 { // No tasks to retry c.JSON(http.StatusOK, utils.APIEmptyResponse{}) return } // Reset task status taskGroup.State = TaskGroupStateRunning s.db.Save(&taskGroup) for _, task := range tasks { task.Error = nil task.State = TaskStateRunning s.db.Save(task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID)) } c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Cancel running tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 400 {object} utils.APIError // @Router /logs/taskgroups/{id}/cancel [post] func (s *Service) CancelTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } taskGroup := TaskGroupModel{} err = s.db.First(&taskGroup, taskGroupID).Error if err != nil { _ = c.Error(err) return } if taskGroup.State != TaskGroupStateRunning { utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running") return } s.scheduler.AsyncAbort(uint(taskGroupID)) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Delete a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [delete] func (s *Service) DeleteTaskGroup(c *gin.Context) { taskGroupID := c.Param("id") taskGroup := TaskGroupModel{} err := s.db.Where("id = ? AND state != ?", taskGroupID, TaskGroupStateRunning).First(&taskGroup).Error if err != nil { _ = c.Error(err) return } taskGroup.Delete(s.db) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Generate a download token for downloading logs // @Produce plain // @Param id query []string false "task id" collectionFormat(csv) // @Security JwtAuth // @Success 200 {string} string "xxx" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Router /logs/download/acquire_token [get] func (s *Service) GetDownloadToken(c *gin.Context) { ids := c.QueryArray("id") str := strings.Join(ids, ",") token, err := utils.NewJWTString("logs/download", str) if err != nil { _ = c.Error(err) return } c.String(http.StatusOK, token) } // @Summary Download logs // @Produce application/x-tar,application/zip // @Param token query string true "download token" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/download [get] func (s *Service) DownloadLogs(c *gin.Context) { token := c.Query("token") str, err := utils.ParseJWTString("logs/download", token) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } ids := strings.Split(str, ",") tasks := make([]*TaskModel, 0, len(ids)) for _, id := range ids { var task TaskModel if s.db. Where("id = ? AND state = ?", id, TaskStateFinished). First(&task). Error == nil { tasks = append(tasks, &task) // Ignore errors silently } } switch len(tasks) { case 0: utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") case 1: serveTaskForDownload(tasks[0], c) default: serveMultipleTaskForDownload(tasks, c) } }
GetTaskGroupPreview
identifier_name
service.go
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logsearch import ( "context" "io/ioutil" "net/http" "strconv" "strings" "github.com/gin-gonic/gin" "github.com/pingcap/log" "go.uber.org/fx" "go.uber.org/zap" "github.com/pingcap/tidb-dashboard/pkg/apiserver/model" "github.com/pingcap/tidb-dashboard/pkg/apiserver/user" "github.com/pingcap/tidb-dashboard/pkg/apiserver/utils" "github.com/pingcap/tidb-dashboard/pkg/config" "github.com/pingcap/tidb-dashboard/pkg/dbstore" ) type Service struct { // FIXME: Use fx.In lifecycleCtx context.Context config *config.Config logStoreDirectory string db *dbstore.DB scheduler *Scheduler } func NewService(lc fx.Lifecycle, config *config.Config, db *dbstore.DB) *Service { dir := config.TempDir if dir == "" { var err error dir, err = ioutil.TempDir("", "dashboard-logs") if err != nil { log.Fatal("Failed to create directory for storing logs", zap.Error(err)) } } err := autoMigrate(db) if err != nil { log.Fatal("Failed to initialize database", zap.Error(err)) } cleanupAllTasks(db) service := &Service{ config: config, logStoreDirectory: dir, db: db, scheduler: nil, // will be filled after scheduler is created } scheduler := NewScheduler(service) service.scheduler = scheduler lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { service.lifecycleCtx = ctx return nil }, }) return service } func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) { endpoint := r.Group("/logs") { endpoint.GET("/download", s.DownloadLogs) endpoint.Use(auth.MWAuthRequired()) { endpoint.GET("/download/acquire_token", s.GetDownloadToken) endpoint.PUT("/taskgroup", s.CreateTaskGroup) endpoint.GET("/taskgroups", s.GetAllTaskGroups) endpoint.GET("/taskgroups/:id", s.GetTaskGroup) endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview) endpoint.POST("/taskgroups/:id/retry", s.RetryTask) endpoint.POST("/taskgroups/:id/cancel", s.CancelTask) endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup) } } } type CreateTaskGroupRequest struct { Request SearchLogRequest `json:"request" binding:"required"` Targets []model.RequestTargetNode `json:"targets" binding:"required"` } type TaskGroupResponse struct { TaskGroup TaskGroupModel `json:"task_group"` Tasks []*TaskModel `json:"tasks"` } // @Summary Create and run a new log search task group // @Param request body CreateTaskGroupRequest true "Request body" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 400 {object} utils.APIError "Bad request" // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroup [put] func (s *Service) CreateTaskGroup(c *gin.Context) { var req CreateTaskGroupRequest if err := c.ShouldBindJSON(&req); err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } if len(req.Targets) == 0 { utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") return } stats := model.NewRequestTargetStatisticsFromArray(&req.Targets) taskGroup := TaskGroupModel{ SearchRequest: &req.Request, State: TaskGroupStateRunning, TargetStats: stats, } if err := s.db.Create(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0, len(req.Targets)) for _, t := range req.Targets { target := t task := &TaskModel{ TaskGroupID: taskGroup.ID, Target: &target, State: TaskStateRunning, } // Ignore task creation errors s.db.Create(task) tasks = append(tasks, task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID)) } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) } // @Summary List all log search task groups // @Security JwtAuth // @Success 200 {array} TaskGroupModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups [get] func (s *Service) GetAllTaskGroups(c *gin.Context) { var taskGroups []*TaskGroupModel err := s.db.Find(&taskGroups).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, taskGroups) } // @Summary List tasks in a log search task group // @Param id path string true "Task Group ID" // @Security JwtAuth // @Success 200 {object} TaskGroupResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [get] func (s *Service) GetTaskGroup(c *gin.Context) { taskGroupID := c.Param("id") var taskGroup TaskGroupModel var tasks []*TaskModel err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error if err != nil { _ = c.Error(err) return } err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error if err != nil { _ = c.Error(err) return } resp := TaskGroupResponse{ TaskGroup: taskGroup, Tasks: tasks, } c.JSON(http.StatusOK, resp) } // @Summary Preview a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {array} PreviewModel // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/preview [get] func (s *Service) GetTaskGroupPreview(c *gin.Context) { taskGroupID := c.Param("id") var lines []PreviewModel err := s.db. Where("task_group_id = ?", taskGroupID). Order("time"). Limit(TaskMaxPreviewLines). Find(&lines).Error if err != nil { _ = c.Error(err) return } c.JSON(http.StatusOK, lines) } // @Summary Retry failed tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id}/retry [post] func (s *Service) RetryTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } // Currently we can only retry finished task group. taskGroup := TaskGroupModel{} if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil { _ = c.Error(err) return } tasks := make([]*TaskModel, 0) if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil { _ = c.Error(err) return } if len(tasks) == 0 { // No tasks to retry c.JSON(http.StatusOK, utils.APIEmptyResponse{}) return } // Reset task status taskGroup.State = TaskGroupStateRunning s.db.Save(&taskGroup) for _, task := range tasks { task.Error = nil task.State = TaskStateRunning s.db.Save(task) } if !s.scheduler.AsyncStart(&taskGroup, tasks) { log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID)) } c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Cancel running tasks in a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 400 {object} utils.APIError // @Router /logs/taskgroups/{id}/cancel [post] func (s *Service) CancelTask(c *gin.Context) { taskGroupID, err := strconv.Atoi(c.Param("id")) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } taskGroup := TaskGroupModel{} err = s.db.First(&taskGroup, taskGroupID).Error if err != nil { _ = c.Error(err) return } if taskGroup.State != TaskGroupStateRunning { utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running") return } s.scheduler.AsyncAbort(uint(taskGroupID)) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Delete a log search task group // @Param id path string true "task group id" // @Security JwtAuth // @Success 200 {object} utils.APIEmptyResponse // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/taskgroups/{id} [delete] func (s *Service) DeleteTaskGroup(c *gin.Context) {
taskGroup := TaskGroupModel{} err := s.db.Where("id = ? AND state != ?", taskGroupID, TaskGroupStateRunning).First(&taskGroup).Error if err != nil { _ = c.Error(err) return } taskGroup.Delete(s.db) c.JSON(http.StatusOK, utils.APIEmptyResponse{}) } // @Summary Generate a download token for downloading logs // @Produce plain // @Param id query []string false "task id" collectionFormat(csv) // @Security JwtAuth // @Success 200 {string} string "xxx" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Router /logs/download/acquire_token [get] func (s *Service) GetDownloadToken(c *gin.Context) { ids := c.QueryArray("id") str := strings.Join(ids, ",") token, err := utils.NewJWTString("logs/download", str) if err != nil { _ = c.Error(err) return } c.String(http.StatusOK, token) } // @Summary Download logs // @Produce application/x-tar,application/zip // @Param token query string true "download token" // @Failure 400 {object} utils.APIError // @Failure 401 {object} utils.APIError "Unauthorized failure" // @Failure 500 {object} utils.APIError // @Router /logs/download [get] func (s *Service) DownloadLogs(c *gin.Context) { token := c.Query("token") str, err := utils.ParseJWTString("logs/download", token) if err != nil { utils.MakeInvalidRequestErrorFromError(c, err) return } ids := strings.Split(str, ",") tasks := make([]*TaskModel, 0, len(ids)) for _, id := range ids { var task TaskModel if s.db. Where("id = ? AND state = ?", id, TaskStateFinished). First(&task). Error == nil { tasks = append(tasks, &task) // Ignore errors silently } } switch len(tasks) { case 0: utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target") case 1: serveTaskForDownload(tasks[0], c) default: serveMultipleTaskForDownload(tasks, c) } }
taskGroupID := c.Param("id")
random_line_split
create.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package manager import ( "fmt" "strings" "time" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubeadm/kinder/pkg/cluster/status" "k8s.io/kubeadm/kinder/pkg/constants" "k8s.io/kubeadm/kinder/pkg/cri/host" "k8s.io/kubeadm/kinder/pkg/cri/nodes" "k8s.io/kubeadm/kinder/pkg/exec" ) // CreateOptions holds all the options used at create time type CreateOptions struct { controlPlanes int workers int image string externalLoadBalancer bool externalEtcd bool retain bool volumes []string } // CreateOption is a configuration option supplied to Create type CreateOption func(*CreateOptions) // ControlPlanes sets the number of control plane nodes for create func ControlPlanes(controlPlanes int) CreateOption { return func(c *CreateOptions) { c.controlPlanes = controlPlanes } } // Workers sets the number of worker nodes for create func Workers(workers int) CreateOption { return func(c *CreateOptions) { c.workers = workers } } // Image sets the image for create func Image(image string) CreateOption { return func(c *CreateOptions) { c.image = image } } // ExternalEtcd instruct create to add an external etcd to the cluster func ExternalEtcd(externalEtcd bool) CreateOption { return func(c *CreateOptions) { c.externalEtcd = externalEtcd } } // ExternalLoadBalancer instruct create to add an external loadbalancer to the cluster. // NB. this happens automatically when there are more than two control plane instances, but with this flag // it is possible to override the default behaviour func ExternalLoadBalancer(externalLoadBalancer bool) CreateOption { return func(c *CreateOptions) { c.externalLoadBalancer = externalLoadBalancer } } // Retain option instructs create cluster to preserve node in case of errors for debugging purposes func Retain(retain bool) CreateOption { return func(c *CreateOptions) { c.retain = retain } } // Volumes option instructs create cluster to add volumes to the node containers func Volumes(volumes []string) CreateOption { return func(c *CreateOptions) { c.volumes = volumes } } // CreateCluster creates a new kinder cluster func CreateCluster(clusterName string, options ...CreateOption) error { flags := &CreateOptions{} for _, o := range options { o(flags) } // Check if the cluster name already exists known, err := status.IsKnown(clusterName) if err != nil { return err } if known { return errors.Errorf("a cluster with the name %q already exists", clusterName) } fmt.Printf("Creating cluster %q ...\n", clusterName) // attempt to explicitly pull the required node image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls ensureNodeImage(flags.image) handleErr := func(err error) error { // In case of errors nodes are deleted (except if retain is explicitly set) if !flags.retain { if c, err := status.FromDocker(clusterName); err != nil { log.Error(err) } else { for _, n := range c.AllNodes() { if err := exec.NewHostCmd( "docker", "rm", "-f", // force the container to be deleted now "-v", // delete volumes n.Name(), ).Run(); err != nil { return errors.Wrapf(err, "failed to delete node %s", n.Name()) } } } } log.Error(err) return err } // Create node containers as defined in the kind config if err := createNodes( clusterName, flags, ); err != nil { return handleErr(err) } fmt.Println() fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n") fmt.Printf("kinder do, the kinder swiss knife 🚀!\n") return nil } func createNodes(clusterName string, flags *CreateOptions) error { // compute the desired nodes, and inform the user that we are setting them up desiredNodes := nodesToCreate(clusterName, flags) numberOfNodes := len(desiredNodes) if flags.externalEtcd { numberOfNodes++ } fmt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes)) // detect CRI runtime installed into images before actually creating nodes runtime, err := status.InspectCRIinImage(flags.image) if err != nil { log.Errorf("Error detecting CRI for images %s! %v", flags.image, err) return err } log.Infof("Detected %s container runtime for image %s", runtime, flags.image) createHelper, err := nodes.NewCreateHelper(runtime) if err != nil { log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err) return err } // create all of the node containers, concurrently fns := []func() error{} for _, desiredNode := range desiredNodes { desiredNode := desiredNode // capture loop variable fns = append(fns, func() error { switch desiredNode.Role { case constants.ExternalLoadBalancerNodeRoleValue: return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name) case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue: return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes) default: return nil } }) } log.Info("Creating nodes...") if err := untilError(fns); err != nil { return err } // add an external etcd if explicitly requested if flags.externalEtcd { log.Info("Getting required etcd image...") c, err := status.FromDocker(clusterName) if err != nil { return err } etcdImage, err := c.BootstrapControlPlane().EtcdImage() if err != nil { return err } // attempt to explicitly pull the etcdImage if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(etcdImage, 4) log.Info("Creating external etcd...") if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil { return err } } // wait for all node containers to have a Running status log.Info("Waiting for all nodes to start...") timeout := time.Second * 40 for _, n := range desiredNodes { var lastErr error log.Infof("Waiting for node %s to start...", n.Name) err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) { lines, err := exec.NewHostCmd( "docker", "container", "inspect", "-f", "'{{.State.Running}}'", n.Name, ).RunAndCapture() if err == nil && len(lines) > 0 && lines[0] == `'true'` { return true, nil } lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines) return false, nil }) if err != nil { return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout) } } // get the cluster c, err := status.FromDocker(clusterName) if err != nil { return err } c.Settings = &status.ClusterSettings{ IPFamily: status.IPv4Family, // only IPv4 is tested with kinder } // TODO: the cluster and node settings are currently unused by kinder // Enable these writes if settings have to stored on the nodes // // // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle. // // if err := c.WriteSettings(); err != nil { // return err // } // // for _, n := range c.K8sNodes() { // if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil { // return err // } // } return nil } // nodeSpec describes a node to create purely from the container aspect // this does not include eg starting kubernetes (see actions for that) type nodeSpec struct { Name string Role string } // nodesToCreate return the list of nodes to create for the cluster func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec { var desiredNodes []nodeSpec // prepare nodes explicitly for n := 0; n < flags.controlPlanes; n++ { role := constants.ControlPlaneNodeRoleValue desiredNode := nodeSpec{
desiredNodes = append(desiredNodes, desiredNode) } for n := 0; n < flags.workers; n++ { role := constants.WorkerNodeRoleValue desiredNode := nodeSpec{ Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, } desiredNodes = append(desiredNodes, desiredNode) } // add an external load balancer if explicitly requested or if there are multiple control planes if flags.externalLoadBalancer || flags.controlPlanes > 1 { role := constants.ExternalLoadBalancerNodeRoleValue desiredNodes = append(desiredNodes, nodeSpec{ Name: fmt.Sprintf("%s-lb", clusterName), Role: role, }) } return desiredNodes } // ensureNodeImage ensures that the node image used by the create is present func ensureNodeImage(image string) { fmt.Printf("Ensuring node image (%s) 🖼\n", image) // attempt to explicitly pull the image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(image, 4) } // UntilError runs all funcs in separate goroutines, returning the // first non-nil error returned from funcs, or nil if all funcs return nil // Nb. this func was originally imported from "sigs.k8s.io/kind/pkg/concurrent"; it is still available // in the kind codebase, but it has been slightly refactored. func untilError(funcs []func() error) error { errCh := make(chan error, len(funcs)) for _, f := range funcs { f := f // capture f go func() { errCh <- f() }() } for i := 0; i < len(funcs); i++ { if err := <-errCh; err != nil { return err } } return nil }
Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, }
random_line_split
create.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package manager import ( "fmt" "strings" "time" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubeadm/kinder/pkg/cluster/status" "k8s.io/kubeadm/kinder/pkg/constants" "k8s.io/kubeadm/kinder/pkg/cri/host" "k8s.io/kubeadm/kinder/pkg/cri/nodes" "k8s.io/kubeadm/kinder/pkg/exec" ) // CreateOptions holds all the options used at create time type CreateOptions struct { controlPlanes int workers int image string externalLoadBalancer bool externalEtcd bool retain bool volumes []string } // CreateOption is a configuration option supplied to Create type CreateOption func(*CreateOptions) // ControlPlanes sets the number of control plane nodes for create func ControlPlanes(controlPlanes int) CreateOption { return func(c *CreateOptions) { c.controlPlanes = controlPlanes } } // Workers sets the number of worker nodes for create func Workers(workers int) CreateOption { return func(c *CreateOptions) { c.workers = workers } } // Image sets the image for create func Image(image string) CreateOption { return func(c *CreateOptions) { c.image = image } } // ExternalEtcd instruct create to add an external etcd to the cluster func ExternalEtcd(externalEtcd bool) CreateOption { return func(c *CreateOptions) { c.externalEtcd = externalEtcd } } // ExternalLoadBalancer instruct create to add an external loadbalancer to the cluster. // NB. this happens automatically when there are more than two control plane instances, but with this flag // it is possible to override the default behaviour func ExternalLoadBalancer(externalLoadBalancer bool) CreateOption { return func(c *CreateOptions) { c.externalLoadBalancer = externalLoadBalancer } } // Retain option instructs create cluster to preserve node in case of errors for debugging purposes func Retain(retain bool) CreateOption { return func(c *CreateOptions) { c.retain = retain } } // Volumes option instructs create cluster to add volumes to the node containers func Volumes(volumes []string) CreateOption { return func(c *CreateOptions) { c.volumes = volumes } } // CreateCluster creates a new kinder cluster func CreateCluster(clusterName string, options ...CreateOption) error { flags := &CreateOptions{} for _, o := range options { o(flags) } // Check if the cluster name already exists known, err := status.IsKnown(clusterName) if err != nil { return err } if known { return errors.Errorf("a cluster with the name %q already exists", clusterName) } fmt.Printf("Creating cluster %q ...\n", clusterName) // attempt to explicitly pull the required node image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls ensureNodeImage(flags.image) handleErr := func(err error) error { // In case of errors nodes are deleted (except if retain is explicitly set) if !flags.retain { if c, err := status.FromDocker(clusterName); err != nil { log.Error(err) } else { for _, n := range c.AllNodes() { if err := exec.NewHostCmd( "docker", "rm", "-f", // force the container to be deleted now "-v", // delete volumes n.Name(), ).Run(); err != nil { return errors.Wrapf(err, "failed to delete node %s", n.Name()) } } } } log.Error(err) return err } // Create node containers as defined in the kind config if err := createNodes( clusterName, flags, ); err != nil { return handleErr(err) } fmt.Println() fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n") fmt.Printf("kinder do, the kinder swiss knife 🚀!\n") return nil } func createNodes(clusterName string, flags *CreateOptions) error { // compute the desired nodes, and inform the user that we are setting them up desiredNodes := nodesToCreate(clusterName, flags) numberOfNodes := len(desiredNodes) if flags.externalEtcd { numberOfNodes++ } fmt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes)) // detect CRI runtime installed into images before actually creating nodes runtime, err := status.InspectCRIinImage(flags.image) if err != nil { log.Errorf("Error detecting CRI for images %s! %v", flags.image, err) return err } log.Infof("Detected %s container runtime for image %s", runtime, flags.image) createHelper, err := nodes.NewCreateHelper(runtime) if err != nil { log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err) return err } // create all of the node containers, concurrently fns := []func() error{} for _, desiredNode := range desiredNodes { desiredNode := desiredNode // capture loop variable fns = append(fns, func() error { switch desiredNode.Role { case constants.ExternalLoadBalancerNodeRoleValue: return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name) case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue: return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes) default: return nil } }) } log.Info("Creating nodes...") if err := untilError(fns); err != nil { return err } // add an external etcd if explicitly requested if flags.externalEtcd { log.Info("Getting required etcd image...") c, err := status.FromDocker(clusterName) if err != nil { return err } etcdImage, err := c.BootstrapControlPlane().EtcdImage() if err != nil { return err } // attempt to explicitly pull the etcdImage if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(etcdImage, 4) log.Info("Creating external etcd...") if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil { return err } } // wait for all node containers to have a Running status log.Info("Waiting for all nodes to start...") timeout := time.Second * 40 for _, n := range desiredNodes { var lastErr error log.Infof("Waiting for node %s to start...", n.Name) err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) { lines, err := exec.NewHostCmd( "docker", "container", "inspect", "-f", "'{{.State.Running}}'", n.Name, ).RunAndCapture() if err == nil && len(lines) > 0 && lines[0] == `'true'` { return true, nil } lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines) return false, nil }) if err != nil { return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout) } } // get the cluster c, err := status.FromDocker(clusterName) if err != nil { return err } c.Settings = &status.ClusterSettings{ IPFamily: status.IPv4Family, // only IPv4 is tested with kinder } // TODO: the cluster and node settings are currently unused by kinder // Enable these writes if settings have to stored on the nodes // // // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle. // // if err := c.WriteSettings(); err != nil { // return err // } // // for _, n := range c.K8sNodes() { // if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil { // return err // } // } return nil } // nodeSpec describes a node to create purely from the container aspect // this does not include eg starting kubernetes (see actions for that) type nodeSpec struct { Name string Role string } // nodesToCreate return the list of nodes to create for the cluster func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec { var desiredNodes []nodeSpec // prepare nodes explicitly for n := 0; n < flags.controlPlanes; n++ { role := constants.ControlPlaneNodeRoleValue desiredNode := nodeSpec{ Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, } desiredNodes = append(desiredNodes, desiredNode) } for n := 0; n < flags.workers; n++ { role := constants.WorkerNodeRoleValue desiredNode := nodeSpec{ Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, } desiredNodes = append(desiredNodes, desiredNode) } // add an external load balancer if explicitly requested or if there are multiple control planes if flags.externalLoadBalancer || flags.controlPlanes > 1 { role := constants.ExternalLoadBalancerNodeRoleValue desiredNodes = append(desiredNodes, nodeSpec{ Name: fmt.Sprintf("%s-lb", clusterName), Role: role, }) } return desiredNodes } // ensureNodeImage ensures that the node image used by the create is present func ensureNodeImage(image string) { fmt.Printf("Ensuring node image (%s) 🖼\n", image) // attempt to explicitly pull the image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(image, 4) } // UntilError runs all funcs in separate goroutines, returning the // first non-nil error returned from funcs, or nil if all funcs return nil // Nb. this func was originally imported from "sigs.k8s.io/kind/pkg/concurrent"; it is still available // in the kind codebase, but it has been slightly refactored. func untilError(funcs []func() error) error { errCh
:= make(chan error, len(funcs)) for _, f := range funcs { f := f // capture f go func() { errCh <- f() }() } for i := 0; i < len(funcs); i++ { if err := <-errCh; err != nil { return err } } return nil }
identifier_body
create.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package manager import ( "fmt" "strings" "time" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubeadm/kinder/pkg/cluster/status" "k8s.io/kubeadm/kinder/pkg/constants" "k8s.io/kubeadm/kinder/pkg/cri/host" "k8s.io/kubeadm/kinder/pkg/cri/nodes" "k8s.io/kubeadm/kinder/pkg/exec" ) // CreateOptions holds all the options used at create time type CreateOptions struct { controlPlanes int workers int image string externalLoadBalancer bool externalEtcd bool retain bool volumes []string } // CreateOption is a configuration option supplied to Create type CreateOption func(*CreateOptions) // ControlPlanes sets the number of control plane nodes for create func ControlPlanes(controlPlanes int) CreateOption { return func(c *CreateOptions) { c.controlPlanes = controlPlanes } } // Workers sets the number of worker nodes for create func Workers(workers int) CreateOption { return func(c *CreateOptions) { c.workers = workers } } // Image sets the image for create func Image(image string) CreateOption { return func(c *CreateOptions) { c.image = image } } // ExternalEtcd instruct create to add an external etcd to the cluster func ExternalEtcd(externalEtcd bool) CreateOption { return func(c *CreateOptions) { c.externalEtcd = externalEtcd } } // ExternalLoadBalancer instruct create to add an external loadbalancer to the cluster. // NB. this happens automatically when there are more than two control plane instances, but with this flag // it is possible to override the default behaviour func ExternalLoadBalancer(externalLoadBalancer bool) CreateOption { return func(c *CreateOptions) { c.externalLoadBalancer = externalLoadBalancer } } // Retain option instructs create cluster to preserve node in case of errors for debugging purposes func Retain(retain bool) CreateOption { return func(c *CreateOptions) { c.retain = retain } } // Volumes option instructs create cluster to add volumes to the node containers func
(volumes []string) CreateOption { return func(c *CreateOptions) { c.volumes = volumes } } // CreateCluster creates a new kinder cluster func CreateCluster(clusterName string, options ...CreateOption) error { flags := &CreateOptions{} for _, o := range options { o(flags) } // Check if the cluster name already exists known, err := status.IsKnown(clusterName) if err != nil { return err } if known { return errors.Errorf("a cluster with the name %q already exists", clusterName) } fmt.Printf("Creating cluster %q ...\n", clusterName) // attempt to explicitly pull the required node image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls ensureNodeImage(flags.image) handleErr := func(err error) error { // In case of errors nodes are deleted (except if retain is explicitly set) if !flags.retain { if c, err := status.FromDocker(clusterName); err != nil { log.Error(err) } else { for _, n := range c.AllNodes() { if err := exec.NewHostCmd( "docker", "rm", "-f", // force the container to be deleted now "-v", // delete volumes n.Name(), ).Run(); err != nil { return errors.Wrapf(err, "failed to delete node %s", n.Name()) } } } } log.Error(err) return err } // Create node containers as defined in the kind config if err := createNodes( clusterName, flags, ); err != nil { return handleErr(err) } fmt.Println() fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n") fmt.Printf("kinder do, the kinder swiss knife 🚀!\n") return nil } func createNodes(clusterName string, flags *CreateOptions) error { // compute the desired nodes, and inform the user that we are setting them up desiredNodes := nodesToCreate(clusterName, flags) numberOfNodes := len(desiredNodes) if flags.externalEtcd { numberOfNodes++ } fmt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes)) // detect CRI runtime installed into images before actually creating nodes runtime, err := status.InspectCRIinImage(flags.image) if err != nil { log.Errorf("Error detecting CRI for images %s! %v", flags.image, err) return err } log.Infof("Detected %s container runtime for image %s", runtime, flags.image) createHelper, err := nodes.NewCreateHelper(runtime) if err != nil { log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err) return err } // create all of the node containers, concurrently fns := []func() error{} for _, desiredNode := range desiredNodes { desiredNode := desiredNode // capture loop variable fns = append(fns, func() error { switch desiredNode.Role { case constants.ExternalLoadBalancerNodeRoleValue: return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name) case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue: return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes) default: return nil } }) } log.Info("Creating nodes...") if err := untilError(fns); err != nil { return err } // add an external etcd if explicitly requested if flags.externalEtcd { log.Info("Getting required etcd image...") c, err := status.FromDocker(clusterName) if err != nil { return err } etcdImage, err := c.BootstrapControlPlane().EtcdImage() if err != nil { return err } // attempt to explicitly pull the etcdImage if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(etcdImage, 4) log.Info("Creating external etcd...") if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil { return err } } // wait for all node containers to have a Running status log.Info("Waiting for all nodes to start...") timeout := time.Second * 40 for _, n := range desiredNodes { var lastErr error log.Infof("Waiting for node %s to start...", n.Name) err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) { lines, err := exec.NewHostCmd( "docker", "container", "inspect", "-f", "'{{.State.Running}}'", n.Name, ).RunAndCapture() if err == nil && len(lines) > 0 && lines[0] == `'true'` { return true, nil } lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines) return false, nil }) if err != nil { return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout) } } // get the cluster c, err := status.FromDocker(clusterName) if err != nil { return err } c.Settings = &status.ClusterSettings{ IPFamily: status.IPv4Family, // only IPv4 is tested with kinder } // TODO: the cluster and node settings are currently unused by kinder // Enable these writes if settings have to stored on the nodes // // // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle. // // if err := c.WriteSettings(); err != nil { // return err // } // // for _, n := range c.K8sNodes() { // if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil { // return err // } // } return nil } // nodeSpec describes a node to create purely from the container aspect // this does not include eg starting kubernetes (see actions for that) type nodeSpec struct { Name string Role string } // nodesToCreate return the list of nodes to create for the cluster func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec { var desiredNodes []nodeSpec // prepare nodes explicitly for n := 0; n < flags.controlPlanes; n++ { role := constants.ControlPlaneNodeRoleValue desiredNode := nodeSpec{ Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, } desiredNodes = append(desiredNodes, desiredNode) } for n := 0; n < flags.workers; n++ { role := constants.WorkerNodeRoleValue desiredNode := nodeSpec{ Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, } desiredNodes = append(desiredNodes, desiredNode) } // add an external load balancer if explicitly requested or if there are multiple control planes if flags.externalLoadBalancer || flags.controlPlanes > 1 { role := constants.ExternalLoadBalancerNodeRoleValue desiredNodes = append(desiredNodes, nodeSpec{ Name: fmt.Sprintf("%s-lb", clusterName), Role: role, }) } return desiredNodes } // ensureNodeImage ensures that the node image used by the create is present func ensureNodeImage(image string) { fmt.Printf("Ensuring node image (%s) 🖼\n", image) // attempt to explicitly pull the image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(image, 4) } // UntilError runs all funcs in separate goroutines, returning the // first non-nil error returned from funcs, or nil if all funcs return nil // Nb. this func was originally imported from "sigs.k8s.io/kind/pkg/concurrent"; it is still available // in the kind codebase, but it has been slightly refactored. func untilError(funcs []func() error) error { errCh := make(chan error, len(funcs)) for _, f := range funcs { f := f // capture f go func() { errCh <- f() }() } for i := 0; i < len(funcs); i++ { if err := <-errCh; err != nil { return err } } return nil }
Volumes
identifier_name
create.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package manager import ( "fmt" "strings" "time" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubeadm/kinder/pkg/cluster/status" "k8s.io/kubeadm/kinder/pkg/constants" "k8s.io/kubeadm/kinder/pkg/cri/host" "k8s.io/kubeadm/kinder/pkg/cri/nodes" "k8s.io/kubeadm/kinder/pkg/exec" ) // CreateOptions holds all the options used at create time type CreateOptions struct { controlPlanes int workers int image string externalLoadBalancer bool externalEtcd bool retain bool volumes []string } // CreateOption is a configuration option supplied to Create type CreateOption func(*CreateOptions) // ControlPlanes sets the number of control plane nodes for create func ControlPlanes(controlPlanes int) CreateOption { return func(c *CreateOptions) { c.controlPlanes = controlPlanes } } // Workers sets the number of worker nodes for create func Workers(workers int) CreateOption { return func(c *CreateOptions) { c.workers = workers } } // Image sets the image for create func Image(image string) CreateOption { return func(c *CreateOptions) { c.image = image } } // ExternalEtcd instruct create to add an external etcd to the cluster func ExternalEtcd(externalEtcd bool) CreateOption { return func(c *CreateOptions) { c.externalEtcd = externalEtcd } } // ExternalLoadBalancer instruct create to add an external loadbalancer to the cluster. // NB. this happens automatically when there are more than two control plane instances, but with this flag // it is possible to override the default behaviour func ExternalLoadBalancer(externalLoadBalancer bool) CreateOption { return func(c *CreateOptions) { c.externalLoadBalancer = externalLoadBalancer } } // Retain option instructs create cluster to preserve node in case of errors for debugging purposes func Retain(retain bool) CreateOption { return func(c *CreateOptions) { c.retain = retain } } // Volumes option instructs create cluster to add volumes to the node containers func Volumes(volumes []string) CreateOption { return func(c *CreateOptions) { c.volumes = volumes } } // CreateCluster creates a new kinder cluster func CreateCluster(clusterName string, options ...CreateOption) error { flags := &CreateOptions{} for _, o := range options { o(flags) } // Check if the cluster name already exists known, err := status.IsKnown(clusterName) if err != nil { return err } if known { return errors.Errorf("a cluster with the name %q already exists", clusterName) } fmt.Printf("Creating cluster %q ...\n", clusterName) // attempt to explicitly pull the required node image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls ensureNodeImage(flags.image) handleErr := func(err error) error { // In case of errors nodes are deleted (except if retain is explicitly set) if !flags.retain { if c, err := status.FromDocker(clusterName); err != nil { log.Error(err) } else { for _, n := range c.AllNodes() { if err := exec.NewHostCmd( "docker", "rm", "-f", // force the container to be deleted now "-v", // delete volumes n.Name(), ).Run(); err != nil { return errors.Wrapf(err, "failed to delete node %s", n.Name()) } } } } log.Error(err) return err } // Create node containers as defined in the kind config if err := createNodes( clusterName, flags, ); err != nil { return handleErr(err) } fmt.Println() fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n") fmt.Printf("kinder do, the kinder swiss knife 🚀!\n") return nil } func createNodes(clusterName string, flags *CreateOptions) error { // compute the desired nodes, and inform the user that we are setting them up desiredNodes := nodesToCreate(clusterName, flags) numberOfNodes := len(desiredNodes) if flags.externalEtcd {
mt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes)) // detect CRI runtime installed into images before actually creating nodes runtime, err := status.InspectCRIinImage(flags.image) if err != nil { log.Errorf("Error detecting CRI for images %s! %v", flags.image, err) return err } log.Infof("Detected %s container runtime for image %s", runtime, flags.image) createHelper, err := nodes.NewCreateHelper(runtime) if err != nil { log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err) return err } // create all of the node containers, concurrently fns := []func() error{} for _, desiredNode := range desiredNodes { desiredNode := desiredNode // capture loop variable fns = append(fns, func() error { switch desiredNode.Role { case constants.ExternalLoadBalancerNodeRoleValue: return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name) case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue: return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes) default: return nil } }) } log.Info("Creating nodes...") if err := untilError(fns); err != nil { return err } // add an external etcd if explicitly requested if flags.externalEtcd { log.Info("Getting required etcd image...") c, err := status.FromDocker(clusterName) if err != nil { return err } etcdImage, err := c.BootstrapControlPlane().EtcdImage() if err != nil { return err } // attempt to explicitly pull the etcdImage if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(etcdImage, 4) log.Info("Creating external etcd...") if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil { return err } } // wait for all node containers to have a Running status log.Info("Waiting for all nodes to start...") timeout := time.Second * 40 for _, n := range desiredNodes { var lastErr error log.Infof("Waiting for node %s to start...", n.Name) err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) { lines, err := exec.NewHostCmd( "docker", "container", "inspect", "-f", "'{{.State.Running}}'", n.Name, ).RunAndCapture() if err == nil && len(lines) > 0 && lines[0] == `'true'` { return true, nil } lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines) return false, nil }) if err != nil { return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout) } } // get the cluster c, err := status.FromDocker(clusterName) if err != nil { return err } c.Settings = &status.ClusterSettings{ IPFamily: status.IPv4Family, // only IPv4 is tested with kinder } // TODO: the cluster and node settings are currently unused by kinder // Enable these writes if settings have to stored on the nodes // // // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle. // // if err := c.WriteSettings(); err != nil { // return err // } // // for _, n := range c.K8sNodes() { // if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil { // return err // } // } return nil } // nodeSpec describes a node to create purely from the container aspect // this does not include eg starting kubernetes (see actions for that) type nodeSpec struct { Name string Role string } // nodesToCreate return the list of nodes to create for the cluster func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec { var desiredNodes []nodeSpec // prepare nodes explicitly for n := 0; n < flags.controlPlanes; n++ { role := constants.ControlPlaneNodeRoleValue desiredNode := nodeSpec{ Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, } desiredNodes = append(desiredNodes, desiredNode) } for n := 0; n < flags.workers; n++ { role := constants.WorkerNodeRoleValue desiredNode := nodeSpec{ Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1), Role: role, } desiredNodes = append(desiredNodes, desiredNode) } // add an external load balancer if explicitly requested or if there are multiple control planes if flags.externalLoadBalancer || flags.controlPlanes > 1 { role := constants.ExternalLoadBalancerNodeRoleValue desiredNodes = append(desiredNodes, nodeSpec{ Name: fmt.Sprintf("%s-lb", clusterName), Role: role, }) } return desiredNodes } // ensureNodeImage ensures that the node image used by the create is present func ensureNodeImage(image string) { fmt.Printf("Ensuring node image (%s) 🖼\n", image) // attempt to explicitly pull the image if it doesn't exist locally // we don't care if this errors, we'll still try to run which also pulls _, _ = host.PullImage(image, 4) } // UntilError runs all funcs in separate goroutines, returning the // first non-nil error returned from funcs, or nil if all funcs return nil // Nb. this func was originally imported from "sigs.k8s.io/kind/pkg/concurrent"; it is still available // in the kind codebase, but it has been slightly refactored. func untilError(funcs []func() error) error { errCh := make(chan error, len(funcs)) for _, f := range funcs { f := f // capture f go func() { errCh <- f() }() } for i := 0; i < len(funcs); i++ { if err := <-errCh; err != nil { return err } } return nil }
numberOfNodes++ } f
conditional_block
command_server.rs
//! Internal server that accepts raw commands, queues them up, and transmits //! them to the Tick Processor asynchronously. Commands are re-transmitted //! if a response isn't received in a timout period. //! //! Responses from the Tick Processor are sent back over the commands channel //! and are sent to worker processes that register interest in them over channels. //! Workers register interest after sending a command so that they can be notified //! of the successful reception of the command. //! //! TODO: Ensure that commands aren't processed twice by storing Uuids or most //! recent 200 commands or something and checking that list before executing (?) //! //! TODO: Use different channel for responses than for commands extern crate test; use std::collections::VecDeque; use std::thread::{self, Thread}; use std::time::Duration; use std::sync::{Arc, Mutex}; use std::str::FromStr; use futures::{Stream, Canceled}; use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver}; use futures::Future; use futures::sync::oneshot::{channel as oneshot, Sender, Receiver}; use uuid::Uuid; use redis; use transport::redis::{get_client, sub_channel}; use transport::commands::*; use conf::CONF; /// A command waiting to be sent plus a Sender to send the Response/Error String /// through and the channel on which to broadcast the Command. struct CommandRequest { cmd: Command, future: Sender<Result<Response, String>>, channel: String, } /// Contains a `CommandRequest` for a worker and a Sender that resolves when the worker /// becomes idle. type WorkerTask = (CommandRequest, Sender<()>); /// Threadsafe queue containing handles to idle command-sender threads in the form of `UnboundedSender`s type UnboundedSenderQueue = Arc<Mutex<VecDeque<UnboundedSender<WorkerTask>>>>; /// Threadsafe queue containing commands waiting to be sent type CommandQueue = Arc<Mutex<VecDeque<CommandRequest>>>; /// A `Vec` containing a `Uuid` of a `Response` that's expected and a `UnboundedSender` to send the /// response through once it arrives type RegisteredList = Vec<(Uuid, UnboundedSender<Result<Response, ()>>)>; /// A message to be sent to the timeout thread containing how long to time out for, /// a oneshot that resolves to a handle to the Timeout's thread as soon as the timeout begins, /// and a oneshot that resolves to `Err(())` if the timeout completes. /// /// The thread handle can be used to end the timeout early to make the timeout thread /// useable again. struct TimeoutRequest { dur: Duration, thread_future: Sender<Thread>, timeout_future: Sender<Result<Response, ()>>, } /// A list of `UnboundedSender`s over which Results from the Tick Processor will be sent if they /// match the ID of the request the command `UnboundedSender` thread sent. struct AlertList { // Vec to hold the ids of responses we're waiting for and `Sender`s // to send the result back to the worker thread // Wrapped in Arc<Mutex<>> so that it can be accessed from within futures pub list: RegisteredList, } /// Send out the Response to a worker that is registered interest to its Uuid fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) { let mut al_inner = al.lock().expect("Unable to unlock al n send_messages"); let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid ); if pos_opt.is_some() { pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future"); } } /// Utility struct for keeping track of the UUIDs of Responses that workers are /// interested in and holding Completes to let them know when they are received impl AlertList { pub fn new() -> AlertList { AlertList { list: Vec::new(), } } /// Register interest in Results with a specified Uuid and send /// the Result over the specified Oneshot when it's received pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) { self.list.push((*response_uuid, c)); } /// Deregisters a listener if a timeout in the case of a timeout occuring pub fn deregister(&mut self, uuid: &Uuid) { let pos_opt = self.list.iter().position(|x| &x.0 == uuid ); match pos_opt { Some(pos) => { self.list.remove(pos); }, None => println!("Error deregistering element from interest list; it's not in it"), } } } #[derive(Clone)] pub struct CommandServer { al: Arc<Mutex<AlertList>>, command_queue: CommandQueue, // internal command queue conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss client: redis::Client, instance: Instance, // The instance that owns this CommandServer } /// Locks the `CommandQueue` and returns a queued command, if there are any. fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> { let mut qq_inner = command_queue.lock() .expect("Unable to unlock qq_inner in try_get_new_command"); qq_inner.pop_front() } fn send_command_outer( al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>, command_queue: CommandQueue, mut attempts: usize, commands_channel: String ) { let wr_cmd = command.wrap(); let _ = send_command(&wr_cmd, client, commands_channel.as_str()); let (sleepy_c, sleepy_o) = oneshot::<Thread>(); let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); // start the timeout timer on a separate thread let dur = Duration::from_millis(CONF.cs_timeout as u64); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; sleeper_tx.send(timeout_msg).unwrap(); // sleepy_o fulfills immediately to a handle to the sleeper thread let sleepy_handle = sleepy_o.wait(); // UnboundedSender for giving to the AlertList and sending the response back let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); // register interest in new Responses coming in with our Command's Uuid { al.lock().expect("Unlock to lock al in send_command_outer #1") .register(&wr_cmd.uuid, res_recvd_c); } res_recvd_o.into_future().map(|(item_opt, _)| { item_opt.expect("item_opt was None") }).map_err(|_| Canceled ).select(awake_o).and_then(move |res| { let (status, _) = res; match status { Ok(wrapped_res) => { // command received { // deregister since we're only waiting on one message al.lock().expect("Unlock to lock al in send_command_outer #2") .deregister(&wr_cmd.uuid); } // end the timeout now so that we can re-use sleeper thread sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark(); // resolve the Response future res_c.complete(Ok(wrapped_res)); return Ok(sleeper_tx) }, Err(_) => { // timed out { al.lock().expect("Couldn't lock al in Err(_)") .deregister(&wr_cmd.uuid); } attempts += 1; if attempts >= CONF.cs_max_retries { // Let the main thread know it's safe to use the UnboundedSender again // This essentially indicates that the worker thread is idle let err_msg = String::from_str("Timed out too many times!").unwrap(); res_c.complete(Err(err_msg)); return Ok(sleeper_tx) } else { // re-send the command // we can do this recursively since it's only a few retries send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c, command_queue, attempts, commands_channel) } } } Ok(sleeper_tx) }).wait().ok().unwrap(); // block until a response is received or the command times out } /// Manually loop over the converted Stream of commands fn dispatch_worker( work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue ) -> Option<()> { let (cr, idle_c) = work; // completes initial command and internally iterates until queue is empty send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); // keep trying to get queued commands to execute until the queue is empty; while let Some(cr) = try_get_new_command(command_queue.clone()) { send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); } idle_c.complete(()); Some(()) } /// Blocks the current thread until a Duration+Complete is received. /// Then it sleeps for that Duration and Completes the oneshot upon awakening. /// Returns a Complete upon starting that can be used to end the timeout early fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) { for res in rx.wait() { match res.unwrap() { TimeoutRequest{dur, thread_future, timeout_future} => { // send a Complete with a handle to the thread thread_future.complete(thread::current()); thread::park_timeout(dur); timeout_future.complete(Err(())); } } } } /// Creates a command processor that awaits requests fn init_command_processor( cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList> ) { let mut client = get_client(CONF.redis_host); // channel for communicating with the sleeper thread let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); thread::spawn(move || init_sleeper(sleeper_rx) ); for task in cmd_rx.wait() { let res = dispatch_worker( task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone() ); // exit if we're in the process of collapse if res.is_none() { break; } } } impl CommandServer { pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer { let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders); let command_queue = Arc::new(Mutex::new(VecDeque::new())); let al = Arc::new(Mutex::new(AlertList::new())); let al_clone = al.clone(); // Handle newly received Responses let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel); thread::spawn(move || { for raw_res_res in rx.wait() { let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread."); let parsed_res = parse_wrapped_response(raw_res); send_messages(parsed_res, &*al_clone); } }); for _ in 0..CONF.conn_senders { let al_clone = al.clone(); let qq_copy = command_queue.clone(); // channel for getting the UnboundedSender back from the worker thread let (tx, rx) = unbounded::<WorkerTask>(); thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) ); // store the UnboundedSender which can be used to send queries // to the worker in the connection queue conn_queue.push_back(tx); } let client = get_client(CONF.redis_host); CommandServer { al: al, command_queue: command_queue, conn_queue: Arc::new(Mutex::new(conn_queue)), client: client, instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), }, } } /// Queues up a command to send to be sent. Returns a future that resolves to /// the returned response. pub fn
( &mut self, command: Command, commands_channel: String ) -> Receiver<Result<Response, String>> { let temp_lock_res = self.conn_queue.lock().unwrap().is_empty(); // Force the guard locking conn_queue to go out of scope // this prevents the lock from being held through the entire if/else let copy_res = temp_lock_res; // future for handing back to the caller that resolves to Response/Error let (res_c, res_o) = oneshot::<Result<Response, String>>(); // future for notifying main thread when command is done and worker is idle let (idle_c, idle_o) = oneshot::<()>(); let cr = CommandRequest { cmd: command, future: res_c, channel: commands_channel, }; if copy_res { self.command_queue.lock().unwrap().push_back(cr); }else{ // type WorkerTask let req = (cr, idle_c); let tx; { tx = self.conn_queue.lock().unwrap().pop_front().unwrap(); tx.send(req).unwrap(); } let cq_clone = self.conn_queue.clone(); thread::spawn(move || { // Wait until the worker thread signals that it is idle let _ = idle_o.wait(); // Put the UnboundedSender for the newly idle worker into the connection queue cq_clone.lock().unwrap().push_back(tx); }); } res_o } pub fn broadcast( &mut self, command: Command, commands_channel: String ) -> Receiver<Vec<Response>> { // spawn a new timeout thread just for this request let (sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); let dur = Duration::from_millis(CONF.cs_timeout as u64); let (sleepy_c, _) = oneshot::<Thread>(); // awake_o fulfills when the timeout expires let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); let wr_cmd = command.wrap(); // Oneshot for sending received responses back with. let (all_responses_c, all_responses_o) = oneshot::<Vec<Response>>(); let alc = self.al.clone(); let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); { // oneshot triggered with matching message received let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.register(&wr_cmd.uuid, res_recvd_c); } let responses_container = Arc::new(Mutex::new(Vec::new())); let responses_container_clone = responses_container.clone(); thread::spawn(move || { for response in res_recvd_o.wait() { match response { Ok(res) => { let mut responses = responses_container_clone.lock().unwrap(); responses.push(res.expect("Inner error in responses iterator")) }, Err(err) => println!("Got error from response iterator: {:?}", err), } } }); let wr_cmd_c = wr_cmd.clone(); thread::spawn(move || { // timer waiter thread // when a timeout happens, poll all the pending interest listners and send results back let _ = awake_o.wait(); // deregister interest { let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.deregister(&wr_cmd_c.uuid); } let responses; { responses = responses_container.lock().unwrap().clone(); } all_responses_c.complete(responses); }); thread::spawn(move || init_sleeper(sleeper_rx) ); // timer thread // actually send the Command let _ = send_command(&wr_cmd, &self.client, commands_channel.as_str()); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; // initiate timeout sleeper_tx.send(timeout_msg).unwrap(); all_responses_o } /// Sends a command asynchronously without bothering to wait for responses. pub fn send_forget(&self, cmd: &Command, channel: &str) { let _ = send_command(&cmd.wrap(), &self.client, channel); } /// Sends a message to the logger with the specified severity pub fn log(&mut self, message_type_opt: Option<&str>, message: &str, level: LogLevel) { let message_type = match message_type_opt { Some(t) => t, None => "General", }; let line = LogMessage { level: level, message_type: String::from(message_type), message: String::from(message), sender: self.instance.clone(), }; self.send_forget(&Command::Log{msg: line}, CONF.redis_log_channel); } /// Shortcut method for logging a debug-level message. pub fn debug(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Debug); } /// Shortcut method for logging a notice-level message. pub fn notice(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Notice); } /// Shortcut method for logging a warning-level message. pub fn warning(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Warning); } /// Shortcut method for logging a error-level message. pub fn error(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Error); } /// Shortcut method for logging a critical-level message. pub fn critical(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Critical); } } #[bench] fn thread_spawn(b: &mut test::Bencher) { b.iter(|| thread::spawn(|| {})) }
execute
identifier_name
command_server.rs
//! Internal server that accepts raw commands, queues them up, and transmits //! them to the Tick Processor asynchronously. Commands are re-transmitted //! if a response isn't received in a timout period. //! //! Responses from the Tick Processor are sent back over the commands channel //! and are sent to worker processes that register interest in them over channels. //! Workers register interest after sending a command so that they can be notified //! of the successful reception of the command. //! //! TODO: Ensure that commands aren't processed twice by storing Uuids or most //! recent 200 commands or something and checking that list before executing (?) //! //! TODO: Use different channel for responses than for commands extern crate test; use std::collections::VecDeque; use std::thread::{self, Thread}; use std::time::Duration; use std::sync::{Arc, Mutex}; use std::str::FromStr; use futures::{Stream, Canceled}; use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver}; use futures::Future; use futures::sync::oneshot::{channel as oneshot, Sender, Receiver}; use uuid::Uuid; use redis; use transport::redis::{get_client, sub_channel}; use transport::commands::*; use conf::CONF; /// A command waiting to be sent plus a Sender to send the Response/Error String /// through and the channel on which to broadcast the Command. struct CommandRequest { cmd: Command, future: Sender<Result<Response, String>>, channel: String, } /// Contains a `CommandRequest` for a worker and a Sender that resolves when the worker /// becomes idle. type WorkerTask = (CommandRequest, Sender<()>); /// Threadsafe queue containing handles to idle command-sender threads in the form of `UnboundedSender`s type UnboundedSenderQueue = Arc<Mutex<VecDeque<UnboundedSender<WorkerTask>>>>; /// Threadsafe queue containing commands waiting to be sent type CommandQueue = Arc<Mutex<VecDeque<CommandRequest>>>; /// A `Vec` containing a `Uuid` of a `Response` that's expected and a `UnboundedSender` to send the /// response through once it arrives type RegisteredList = Vec<(Uuid, UnboundedSender<Result<Response, ()>>)>; /// A message to be sent to the timeout thread containing how long to time out for, /// a oneshot that resolves to a handle to the Timeout's thread as soon as the timeout begins, /// and a oneshot that resolves to `Err(())` if the timeout completes. /// /// The thread handle can be used to end the timeout early to make the timeout thread /// useable again. struct TimeoutRequest { dur: Duration, thread_future: Sender<Thread>, timeout_future: Sender<Result<Response, ()>>, } /// A list of `UnboundedSender`s over which Results from the Tick Processor will be sent if they /// match the ID of the request the command `UnboundedSender` thread sent. struct AlertList { // Vec to hold the ids of responses we're waiting for and `Sender`s // to send the result back to the worker thread // Wrapped in Arc<Mutex<>> so that it can be accessed from within futures pub list: RegisteredList, } /// Send out the Response to a worker that is registered interest to its Uuid fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) { let mut al_inner = al.lock().expect("Unable to unlock al n send_messages"); let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid ); if pos_opt.is_some() { pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future"); } } /// Utility struct for keeping track of the UUIDs of Responses that workers are /// interested in and holding Completes to let them know when they are received impl AlertList { pub fn new() -> AlertList { AlertList { list: Vec::new(), } } /// Register interest in Results with a specified Uuid and send /// the Result over the specified Oneshot when it's received pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) { self.list.push((*response_uuid, c)); } /// Deregisters a listener if a timeout in the case of a timeout occuring pub fn deregister(&mut self, uuid: &Uuid) { let pos_opt = self.list.iter().position(|x| &x.0 == uuid ); match pos_opt { Some(pos) => { self.list.remove(pos); }, None => println!("Error deregistering element from interest list; it's not in it"), } } } #[derive(Clone)] pub struct CommandServer { al: Arc<Mutex<AlertList>>, command_queue: CommandQueue, // internal command queue conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss client: redis::Client, instance: Instance, // The instance that owns this CommandServer } /// Locks the `CommandQueue` and returns a queued command, if there are any. fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> { let mut qq_inner = command_queue.lock() .expect("Unable to unlock qq_inner in try_get_new_command"); qq_inner.pop_front() } fn send_command_outer( al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>, command_queue: CommandQueue, mut attempts: usize, commands_channel: String ) { let wr_cmd = command.wrap(); let _ = send_command(&wr_cmd, client, commands_channel.as_str()); let (sleepy_c, sleepy_o) = oneshot::<Thread>(); let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); // start the timeout timer on a separate thread let dur = Duration::from_millis(CONF.cs_timeout as u64); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; sleeper_tx.send(timeout_msg).unwrap(); // sleepy_o fulfills immediately to a handle to the sleeper thread let sleepy_handle = sleepy_o.wait(); // UnboundedSender for giving to the AlertList and sending the response back let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); // register interest in new Responses coming in with our Command's Uuid { al.lock().expect("Unlock to lock al in send_command_outer #1") .register(&wr_cmd.uuid, res_recvd_c); } res_recvd_o.into_future().map(|(item_opt, _)| { item_opt.expect("item_opt was None") }).map_err(|_| Canceled ).select(awake_o).and_then(move |res| { let (status, _) = res; match status { Ok(wrapped_res) => { // command received { // deregister since we're only waiting on one message al.lock().expect("Unlock to lock al in send_command_outer #2") .deregister(&wr_cmd.uuid); } // end the timeout now so that we can re-use sleeper thread sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark(); // resolve the Response future res_c.complete(Ok(wrapped_res)); return Ok(sleeper_tx) }, Err(_) => { // timed out { al.lock().expect("Couldn't lock al in Err(_)") .deregister(&wr_cmd.uuid); } attempts += 1; if attempts >= CONF.cs_max_retries { // Let the main thread know it's safe to use the UnboundedSender again // This essentially indicates that the worker thread is idle let err_msg = String::from_str("Timed out too many times!").unwrap(); res_c.complete(Err(err_msg)); return Ok(sleeper_tx) } else { // re-send the command // we can do this recursively since it's only a few retries send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c, command_queue, attempts, commands_channel) } } } Ok(sleeper_tx) }).wait().ok().unwrap(); // block until a response is received or the command times out } /// Manually loop over the converted Stream of commands fn dispatch_worker( work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue ) -> Option<()> { let (cr, idle_c) = work; // completes initial command and internally iterates until queue is empty send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); // keep trying to get queued commands to execute until the queue is empty; while let Some(cr) = try_get_new_command(command_queue.clone()) { send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); } idle_c.complete(()); Some(()) } /// Blocks the current thread until a Duration+Complete is received. /// Then it sleeps for that Duration and Completes the oneshot upon awakening. /// Returns a Complete upon starting that can be used to end the timeout early fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) { for res in rx.wait() { match res.unwrap() { TimeoutRequest{dur, thread_future, timeout_future} => { // send a Complete with a handle to the thread thread_future.complete(thread::current()); thread::park_timeout(dur); timeout_future.complete(Err(())); } } } } /// Creates a command processor that awaits requests fn init_command_processor( cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList> ) { let mut client = get_client(CONF.redis_host); // channel for communicating with the sleeper thread let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); thread::spawn(move || init_sleeper(sleeper_rx) );
for task in cmd_rx.wait() { let res = dispatch_worker( task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone() ); // exit if we're in the process of collapse if res.is_none() { break; } } } impl CommandServer { pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer { let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders); let command_queue = Arc::new(Mutex::new(VecDeque::new())); let al = Arc::new(Mutex::new(AlertList::new())); let al_clone = al.clone(); // Handle newly received Responses let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel); thread::spawn(move || { for raw_res_res in rx.wait() { let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread."); let parsed_res = parse_wrapped_response(raw_res); send_messages(parsed_res, &*al_clone); } }); for _ in 0..CONF.conn_senders { let al_clone = al.clone(); let qq_copy = command_queue.clone(); // channel for getting the UnboundedSender back from the worker thread let (tx, rx) = unbounded::<WorkerTask>(); thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) ); // store the UnboundedSender which can be used to send queries // to the worker in the connection queue conn_queue.push_back(tx); } let client = get_client(CONF.redis_host); CommandServer { al: al, command_queue: command_queue, conn_queue: Arc::new(Mutex::new(conn_queue)), client: client, instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), }, } } /// Queues up a command to send to be sent. Returns a future that resolves to /// the returned response. pub fn execute( &mut self, command: Command, commands_channel: String ) -> Receiver<Result<Response, String>> { let temp_lock_res = self.conn_queue.lock().unwrap().is_empty(); // Force the guard locking conn_queue to go out of scope // this prevents the lock from being held through the entire if/else let copy_res = temp_lock_res; // future for handing back to the caller that resolves to Response/Error let (res_c, res_o) = oneshot::<Result<Response, String>>(); // future for notifying main thread when command is done and worker is idle let (idle_c, idle_o) = oneshot::<()>(); let cr = CommandRequest { cmd: command, future: res_c, channel: commands_channel, }; if copy_res { self.command_queue.lock().unwrap().push_back(cr); }else{ // type WorkerTask let req = (cr, idle_c); let tx; { tx = self.conn_queue.lock().unwrap().pop_front().unwrap(); tx.send(req).unwrap(); } let cq_clone = self.conn_queue.clone(); thread::spawn(move || { // Wait until the worker thread signals that it is idle let _ = idle_o.wait(); // Put the UnboundedSender for the newly idle worker into the connection queue cq_clone.lock().unwrap().push_back(tx); }); } res_o } pub fn broadcast( &mut self, command: Command, commands_channel: String ) -> Receiver<Vec<Response>> { // spawn a new timeout thread just for this request let (sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); let dur = Duration::from_millis(CONF.cs_timeout as u64); let (sleepy_c, _) = oneshot::<Thread>(); // awake_o fulfills when the timeout expires let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); let wr_cmd = command.wrap(); // Oneshot for sending received responses back with. let (all_responses_c, all_responses_o) = oneshot::<Vec<Response>>(); let alc = self.al.clone(); let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); { // oneshot triggered with matching message received let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.register(&wr_cmd.uuid, res_recvd_c); } let responses_container = Arc::new(Mutex::new(Vec::new())); let responses_container_clone = responses_container.clone(); thread::spawn(move || { for response in res_recvd_o.wait() { match response { Ok(res) => { let mut responses = responses_container_clone.lock().unwrap(); responses.push(res.expect("Inner error in responses iterator")) }, Err(err) => println!("Got error from response iterator: {:?}", err), } } }); let wr_cmd_c = wr_cmd.clone(); thread::spawn(move || { // timer waiter thread // when a timeout happens, poll all the pending interest listners and send results back let _ = awake_o.wait(); // deregister interest { let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.deregister(&wr_cmd_c.uuid); } let responses; { responses = responses_container.lock().unwrap().clone(); } all_responses_c.complete(responses); }); thread::spawn(move || init_sleeper(sleeper_rx) ); // timer thread // actually send the Command let _ = send_command(&wr_cmd, &self.client, commands_channel.as_str()); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; // initiate timeout sleeper_tx.send(timeout_msg).unwrap(); all_responses_o } /// Sends a command asynchronously without bothering to wait for responses. pub fn send_forget(&self, cmd: &Command, channel: &str) { let _ = send_command(&cmd.wrap(), &self.client, channel); } /// Sends a message to the logger with the specified severity pub fn log(&mut self, message_type_opt: Option<&str>, message: &str, level: LogLevel) { let message_type = match message_type_opt { Some(t) => t, None => "General", }; let line = LogMessage { level: level, message_type: String::from(message_type), message: String::from(message), sender: self.instance.clone(), }; self.send_forget(&Command::Log{msg: line}, CONF.redis_log_channel); } /// Shortcut method for logging a debug-level message. pub fn debug(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Debug); } /// Shortcut method for logging a notice-level message. pub fn notice(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Notice); } /// Shortcut method for logging a warning-level message. pub fn warning(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Warning); } /// Shortcut method for logging a error-level message. pub fn error(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Error); } /// Shortcut method for logging a critical-level message. pub fn critical(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Critical); } } #[bench] fn thread_spawn(b: &mut test::Bencher) { b.iter(|| thread::spawn(|| {})) }
random_line_split
command_server.rs
//! Internal server that accepts raw commands, queues them up, and transmits //! them to the Tick Processor asynchronously. Commands are re-transmitted //! if a response isn't received in a timout period. //! //! Responses from the Tick Processor are sent back over the commands channel //! and are sent to worker processes that register interest in them over channels. //! Workers register interest after sending a command so that they can be notified //! of the successful reception of the command. //! //! TODO: Ensure that commands aren't processed twice by storing Uuids or most //! recent 200 commands or something and checking that list before executing (?) //! //! TODO: Use different channel for responses than for commands extern crate test; use std::collections::VecDeque; use std::thread::{self, Thread}; use std::time::Duration; use std::sync::{Arc, Mutex}; use std::str::FromStr; use futures::{Stream, Canceled}; use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver}; use futures::Future; use futures::sync::oneshot::{channel as oneshot, Sender, Receiver}; use uuid::Uuid; use redis; use transport::redis::{get_client, sub_channel}; use transport::commands::*; use conf::CONF; /// A command waiting to be sent plus a Sender to send the Response/Error String /// through and the channel on which to broadcast the Command. struct CommandRequest { cmd: Command, future: Sender<Result<Response, String>>, channel: String, } /// Contains a `CommandRequest` for a worker and a Sender that resolves when the worker /// becomes idle. type WorkerTask = (CommandRequest, Sender<()>); /// Threadsafe queue containing handles to idle command-sender threads in the form of `UnboundedSender`s type UnboundedSenderQueue = Arc<Mutex<VecDeque<UnboundedSender<WorkerTask>>>>; /// Threadsafe queue containing commands waiting to be sent type CommandQueue = Arc<Mutex<VecDeque<CommandRequest>>>; /// A `Vec` containing a `Uuid` of a `Response` that's expected and a `UnboundedSender` to send the /// response through once it arrives type RegisteredList = Vec<(Uuid, UnboundedSender<Result<Response, ()>>)>; /// A message to be sent to the timeout thread containing how long to time out for, /// a oneshot that resolves to a handle to the Timeout's thread as soon as the timeout begins, /// and a oneshot that resolves to `Err(())` if the timeout completes. /// /// The thread handle can be used to end the timeout early to make the timeout thread /// useable again. struct TimeoutRequest { dur: Duration, thread_future: Sender<Thread>, timeout_future: Sender<Result<Response, ()>>, } /// A list of `UnboundedSender`s over which Results from the Tick Processor will be sent if they /// match the ID of the request the command `UnboundedSender` thread sent. struct AlertList { // Vec to hold the ids of responses we're waiting for and `Sender`s // to send the result back to the worker thread // Wrapped in Arc<Mutex<>> so that it can be accessed from within futures pub list: RegisteredList, } /// Send out the Response to a worker that is registered interest to its Uuid fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) { let mut al_inner = al.lock().expect("Unable to unlock al n send_messages"); let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid ); if pos_opt.is_some() { pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future"); } } /// Utility struct for keeping track of the UUIDs of Responses that workers are /// interested in and holding Completes to let them know when they are received impl AlertList { pub fn new() -> AlertList { AlertList { list: Vec::new(), } } /// Register interest in Results with a specified Uuid and send /// the Result over the specified Oneshot when it's received pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) { self.list.push((*response_uuid, c)); } /// Deregisters a listener if a timeout in the case of a timeout occuring pub fn deregister(&mut self, uuid: &Uuid) { let pos_opt = self.list.iter().position(|x| &x.0 == uuid ); match pos_opt { Some(pos) => { self.list.remove(pos); }, None => println!("Error deregistering element from interest list; it's not in it"), } } } #[derive(Clone)] pub struct CommandServer { al: Arc<Mutex<AlertList>>, command_queue: CommandQueue, // internal command queue conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss client: redis::Client, instance: Instance, // The instance that owns this CommandServer } /// Locks the `CommandQueue` and returns a queued command, if there are any. fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> { let mut qq_inner = command_queue.lock() .expect("Unable to unlock qq_inner in try_get_new_command"); qq_inner.pop_front() } fn send_command_outer( al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>, command_queue: CommandQueue, mut attempts: usize, commands_channel: String ) { let wr_cmd = command.wrap(); let _ = send_command(&wr_cmd, client, commands_channel.as_str()); let (sleepy_c, sleepy_o) = oneshot::<Thread>(); let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); // start the timeout timer on a separate thread let dur = Duration::from_millis(CONF.cs_timeout as u64); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; sleeper_tx.send(timeout_msg).unwrap(); // sleepy_o fulfills immediately to a handle to the sleeper thread let sleepy_handle = sleepy_o.wait(); // UnboundedSender for giving to the AlertList and sending the response back let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); // register interest in new Responses coming in with our Command's Uuid { al.lock().expect("Unlock to lock al in send_command_outer #1") .register(&wr_cmd.uuid, res_recvd_c); } res_recvd_o.into_future().map(|(item_opt, _)| { item_opt.expect("item_opt was None") }).map_err(|_| Canceled ).select(awake_o).and_then(move |res| { let (status, _) = res; match status { Ok(wrapped_res) => { // command received { // deregister since we're only waiting on one message al.lock().expect("Unlock to lock al in send_command_outer #2") .deregister(&wr_cmd.uuid); } // end the timeout now so that we can re-use sleeper thread sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark(); // resolve the Response future res_c.complete(Ok(wrapped_res)); return Ok(sleeper_tx) }, Err(_) =>
} Ok(sleeper_tx) }).wait().ok().unwrap(); // block until a response is received or the command times out } /// Manually loop over the converted Stream of commands fn dispatch_worker( work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue ) -> Option<()> { let (cr, idle_c) = work; // completes initial command and internally iterates until queue is empty send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); // keep trying to get queued commands to execute until the queue is empty; while let Some(cr) = try_get_new_command(command_queue.clone()) { send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); } idle_c.complete(()); Some(()) } /// Blocks the current thread until a Duration+Complete is received. /// Then it sleeps for that Duration and Completes the oneshot upon awakening. /// Returns a Complete upon starting that can be used to end the timeout early fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) { for res in rx.wait() { match res.unwrap() { TimeoutRequest{dur, thread_future, timeout_future} => { // send a Complete with a handle to the thread thread_future.complete(thread::current()); thread::park_timeout(dur); timeout_future.complete(Err(())); } } } } /// Creates a command processor that awaits requests fn init_command_processor( cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList> ) { let mut client = get_client(CONF.redis_host); // channel for communicating with the sleeper thread let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); thread::spawn(move || init_sleeper(sleeper_rx) ); for task in cmd_rx.wait() { let res = dispatch_worker( task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone() ); // exit if we're in the process of collapse if res.is_none() { break; } } } impl CommandServer { pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer { let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders); let command_queue = Arc::new(Mutex::new(VecDeque::new())); let al = Arc::new(Mutex::new(AlertList::new())); let al_clone = al.clone(); // Handle newly received Responses let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel); thread::spawn(move || { for raw_res_res in rx.wait() { let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread."); let parsed_res = parse_wrapped_response(raw_res); send_messages(parsed_res, &*al_clone); } }); for _ in 0..CONF.conn_senders { let al_clone = al.clone(); let qq_copy = command_queue.clone(); // channel for getting the UnboundedSender back from the worker thread let (tx, rx) = unbounded::<WorkerTask>(); thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) ); // store the UnboundedSender which can be used to send queries // to the worker in the connection queue conn_queue.push_back(tx); } let client = get_client(CONF.redis_host); CommandServer { al: al, command_queue: command_queue, conn_queue: Arc::new(Mutex::new(conn_queue)), client: client, instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), }, } } /// Queues up a command to send to be sent. Returns a future that resolves to /// the returned response. pub fn execute( &mut self, command: Command, commands_channel: String ) -> Receiver<Result<Response, String>> { let temp_lock_res = self.conn_queue.lock().unwrap().is_empty(); // Force the guard locking conn_queue to go out of scope // this prevents the lock from being held through the entire if/else let copy_res = temp_lock_res; // future for handing back to the caller that resolves to Response/Error let (res_c, res_o) = oneshot::<Result<Response, String>>(); // future for notifying main thread when command is done and worker is idle let (idle_c, idle_o) = oneshot::<()>(); let cr = CommandRequest { cmd: command, future: res_c, channel: commands_channel, }; if copy_res { self.command_queue.lock().unwrap().push_back(cr); }else{ // type WorkerTask let req = (cr, idle_c); let tx; { tx = self.conn_queue.lock().unwrap().pop_front().unwrap(); tx.send(req).unwrap(); } let cq_clone = self.conn_queue.clone(); thread::spawn(move || { // Wait until the worker thread signals that it is idle let _ = idle_o.wait(); // Put the UnboundedSender for the newly idle worker into the connection queue cq_clone.lock().unwrap().push_back(tx); }); } res_o } pub fn broadcast( &mut self, command: Command, commands_channel: String ) -> Receiver<Vec<Response>> { // spawn a new timeout thread just for this request let (sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); let dur = Duration::from_millis(CONF.cs_timeout as u64); let (sleepy_c, _) = oneshot::<Thread>(); // awake_o fulfills when the timeout expires let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); let wr_cmd = command.wrap(); // Oneshot for sending received responses back with. let (all_responses_c, all_responses_o) = oneshot::<Vec<Response>>(); let alc = self.al.clone(); let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); { // oneshot triggered with matching message received let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.register(&wr_cmd.uuid, res_recvd_c); } let responses_container = Arc::new(Mutex::new(Vec::new())); let responses_container_clone = responses_container.clone(); thread::spawn(move || { for response in res_recvd_o.wait() { match response { Ok(res) => { let mut responses = responses_container_clone.lock().unwrap(); responses.push(res.expect("Inner error in responses iterator")) }, Err(err) => println!("Got error from response iterator: {:?}", err), } } }); let wr_cmd_c = wr_cmd.clone(); thread::spawn(move || { // timer waiter thread // when a timeout happens, poll all the pending interest listners and send results back let _ = awake_o.wait(); // deregister interest { let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.deregister(&wr_cmd_c.uuid); } let responses; { responses = responses_container.lock().unwrap().clone(); } all_responses_c.complete(responses); }); thread::spawn(move || init_sleeper(sleeper_rx) ); // timer thread // actually send the Command let _ = send_command(&wr_cmd, &self.client, commands_channel.as_str()); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; // initiate timeout sleeper_tx.send(timeout_msg).unwrap(); all_responses_o } /// Sends a command asynchronously without bothering to wait for responses. pub fn send_forget(&self, cmd: &Command, channel: &str) { let _ = send_command(&cmd.wrap(), &self.client, channel); } /// Sends a message to the logger with the specified severity pub fn log(&mut self, message_type_opt: Option<&str>, message: &str, level: LogLevel) { let message_type = match message_type_opt { Some(t) => t, None => "General", }; let line = LogMessage { level: level, message_type: String::from(message_type), message: String::from(message), sender: self.instance.clone(), }; self.send_forget(&Command::Log{msg: line}, CONF.redis_log_channel); } /// Shortcut method for logging a debug-level message. pub fn debug(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Debug); } /// Shortcut method for logging a notice-level message. pub fn notice(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Notice); } /// Shortcut method for logging a warning-level message. pub fn warning(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Warning); } /// Shortcut method for logging a error-level message. pub fn error(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Error); } /// Shortcut method for logging a critical-level message. pub fn critical(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Critical); } } #[bench] fn thread_spawn(b: &mut test::Bencher) { b.iter(|| thread::spawn(|| {})) }
{ // timed out { al.lock().expect("Couldn't lock al in Err(_)") .deregister(&wr_cmd.uuid); } attempts += 1; if attempts >= CONF.cs_max_retries { // Let the main thread know it's safe to use the UnboundedSender again // This essentially indicates that the worker thread is idle let err_msg = String::from_str("Timed out too many times!").unwrap(); res_c.complete(Err(err_msg)); return Ok(sleeper_tx) } else { // re-send the command // we can do this recursively since it's only a few retries send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c, command_queue, attempts, commands_channel) } }
conditional_block
Workload.js
/** * Created by songchao on 16/6/25. */ /** * 入口方法,调用显示工作量 * @param employeeID */ function showWorkload(packageID) { if (packageID != undefined) { Workload.id = packageID; Workload.whichRequest = 1; } $(canvas).attr({width: $(canvas).width() + "px", height: $(canvas).height() + "px"}); setTimeout(function () { ReactDOM.render(React.createElement(WorkloadInputComponent, null), document.getElementById("workload_input_container")) }, 1000); draw(canvas); } //-----获取响应元素 var canvas = document.getElementById("workload_canvas"); var toast = document.getElementById("workload_toast"); //-----数据配置存储类 var Workload = { Data: [],//数据 type: 0,//类型,包括1,年,2,月,3,日,4自定义日期 year: undefined,//年 month: undefined,//月 day: undefined,//存放当前选择日期 days: undefined,//存放当前需要向服务器请求的日期数 rectNum: 0,//存放需要在图上绘制的矩形个数 mouseEvent: [],//时间绑定到这里 id: User.id,//请求的id,默认是用户的id whichRequest: 0,//那个组织的工足量?0,默认,个人工作量,1,营业网点工作量 width: function () { return $(canvas).width(); }, height: function () { return $(canvas).height(); }, offsetLeft: 40, offsetTop: 40, offsetRight: 40, offsetBottom: 40, arrowLength: 5, limitY: 0, unitLengthX: undefined, unitLengthY: function () { return (this.getYLength() - 40) / this.limitY; }, getRectMaxH: function () { return this.getYLength() - 40; }, getX: function (x) { return x + this.offsetLeft; }, getY: function (y) { return this.height() - this.offsetBottom - y; }, getXLength: function () { return this.width() - this.offsetLeft - this.offsetRight; }, getYLength: function () { return this.height() - this.offsetTop - this.offsetBottom; }, drawLine: function (context, x1, y1, x2, y2) { context.moveTo(x1, y1); context.lineTo(x2, y2); }, setXLimit: function () { //this.rectNum = this.Data.length; switch (this.type) { case 0: Workload.unitLengthX = (Workload.getXLength() - 40) / 24; break; case 1: Workload.unitLengthX = (Workload.getXLength() - 40) / 62; break; case 2: Workload.unitLengthX = (Workload.getXLength() - 40) / 48; break; case 3: if (this.rectNum != undefined) { Workload.unitLengthX = (Workload.getXLength() - 40) / (2 * this.rectNum); } break; } }, init: function () { this.mouseEvent.length = 0; this.setXLimit(); this.type = parseInt(this.type); this.year = parseInt(this.year); this.month = parseInt(this.month); this.day = parseInt(this.day); this.days = parseInt(this.days); this.rectNum = parseInt(this.rectNum); var dd = this.Data; this.limitY = Math.max.apply(null, dd); } }; /** * 工作量容器 */ var WorkloadInputComponent = React.createClass({displayName: "WorkloadInputComponent", getInitialState: function () { return { year: "2010", month: "1", day: "1", toDay: "", } }, yearChange: function (e) { var value = e.target.value; this.setState({year: value}); loadData(0, value); }, monthChange: function (e) { var value = e.target.value; this.setState({month: value}); loadData(1, this.state.year, value); }, dayChange: function (e) { var value = e.target.value; this.setState({day: value}); loadData(2, this.state.year, this.state.month, value); }, inputBlur: function (e) { var value = e.target.value; if (value != "") { this.setState({toDay: value}); loadData(3, this.state.year, this.state.month, this.state.day, value); } }, render: function () { var month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; var year = [2010, 2011, 2012, 2013, 2014, 2015, 2016]; var day = []; for (var i = 1; i <= 30; i++) { day.push(i); } return ( React.createElement("div", null, React.createElement("select", {onChange: this.yearChange, className: "form-control"}, year.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("select", {onChange: this.monthChange, className: "form-control"}, month.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("select", {onChange: this.dayChange, className: "form-control"}, day.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("input", {onBlur: this.inputBlur, type: "number", className: "form-control"}) ) ); } }); function loadData(type, year, month, day, toDay) { //type 1,year,2month,3day,4today //初始化workload类中的参数 if (month < 10) { month = "0" + month; } if (day < 10) { day = "0" + day; } var fromTime = ""; var days = 0; switch (type) { case 0: Workload.year = year; fromTime = year + "-01-01"; Workload.days = 365; Workload.rectNum = 12; break; case 1: Workload.month = month; fromTime = year + "-" + month + "-01"; Workload.days = 31; Workload.rectNum = 31; break; case 2: Workload.day = day; fromTime = year + "-" + month + "-" + day; Workload.days = 1; Workload.rectNum = 24; break; case 3: Workload.days = toDay; Workload.rectNum = toDay; fromTime = year + "-" + month + "-" + day; break; } var url = ""; if (Workload.whichRequest == 0) {//0是个人工作量 url = "/REST/Domain/getWork/employeeId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days; } else if (Workload.whichRequest == 1) {//1是网点工作量 url = "/REST/Domain/getWorkOfOutlets/outletId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days; } Tools.myAjax({ type: "get", url: url, success: function (data) { //通过时间区分工作量 handleData(data, type); }, error: function (data) { console.error(data); showDialog("dialog", "错误", "获取工作量错误fromtime:" + fromTime + "day:" + Workload.days, true); } }); } function handleData(data, type) { var load = []; var i = 0; if (type == 0) { //统计一年中每个月的工作量 for (i = 0; i < data.length; i++) { var dd = data[i]; var month = new Date(dd.outTime).getMonth() + 1; if (load[month] == undefined) { load[month] = 1; } else { load[month]++; } } } else if (type == 1) {//月 //统计一年中每个月的工作量 for (i = 0; i < data.length; i++) { var dd = data[i]; var day = new Date(dd.outTime).getDate(); if (load[day] == undefined) { load[day] = 1; } else { load[day]++; } } } else if (type == 2) {//日 for (i = 0; i < data.length; i++) { var dd = data[i]; var hour = new Date(dd.outTime).getHours(); if (load[hour] == undefined) { load[hour] = 1; } else { load[hour]++; } } } else if (type == 3) { for (i = 0; i < data.length; i++) { var dd = data[i]; var day = new Date(dd.outTime).getDate(); if (load[day] == undefined) { load[day] = 1; } else { load[day]++; } } } //把空值设为0 if (type == 3) { for (i = 0; i <= parseInt(Workload.day) + parseInt(Workload.days); i++) { if (load[i] == undefined) { load[i] = 0; } } } else { for (i = 0; i <= Workload.rectNum; i++) { if (load[i] == undefined) { load[i] = 0; } } } Workload.Data = load; Workload.type = type; draw(canvas); } //type,0,代表年,1代表月,2代表日 //开始绘画 function draw(canvas) { var context = canvas.getContext("2d"); context.fillStyle = "#ffffff"; context.fillRect(0, 0, Workload.width(), Workload.height()); context.font = "10px Georgia"; lineXY(context); drawCloseButton(context); Workload.init(); myClearInterval(); drawPic(context); } function myClearInterval() { //清除所有动画 for (var i = 0; i < time.length; i++) { clearInterval(time[i]); } time.length = 0; } function drawCloseButton(context) { var image = new Image(); image.src = "../images/index/close.png"; var imagex = Workload.width() - 37; var imagey = 5; if (image.complete) { context.drawImage(image, imagex, imagey); } else { image.onload = function () { context.drawImage(image, imagex, imagey); } } } //循环每一个矩形 function drawPic(context) { for (var i = 0; i <= Workload.rectNum; i++) { if (i == 0) { continue; } aniH(context, i, 0); } open(); } //动画实现 //-----bug---动画进行中切换数据会出错 var time = []; function aniH(context, i, hh) { var geadd = Workload.limitY / 2500; var getime = 10; var signTime = setInterval(function () { //通过i-1,把i==0,占用的空间去掉 createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i); if (hh >= (Workload.type == 3 ? Workload.Data[Workload.day + i - 1] : Workload.Data[i])) { window.clearInterval(signTime); createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i, true); } }, 10); time.push(signTime); } //画出xy轴 function lineXY(context) { //画x,y轴 context.beginPath(); Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(0), Workload.getY(Workload.getYLength())); Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) - Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength); Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) + Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength); context.fillText("工作量", Workload.getX(0), Workload.getY(Workload.getYLength()) - 5); context.stroke(); Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(Workload.getXLength()), Workload.getY(0)); Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) - Workload.arrowLength); Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) + Workload.arrowLength); context.fillText("时间", Workload.getX(Workload.getXLength()) - 20, Workload.getY(0) - 20); context.stroke(); //画xy轴上的字 //y轴 context.fillText(Workload.limitY + "", Workload.getX(-20), Workload.getY(Workload.getRectMaxH())); context.fillText("啦啦啦", 20, 80); context.stroke(); } //创建矩形类,同时添加鼠标响应事件,同时把事件响应注册到数组里 function createRect(context, x, width, height, whichDay, isAddToEvent) { //每创建一个矩形就把它画出来 var y = height * Workload.unitLengthY(); if (isAddToEvent) { var o = new Object(); o.x = x; o.width = width; o.height = height; o.whichDay = whichDay; o.y = Workload.getY(0) - y; o.mouseMove = function (e) { switch (Workload.type) { case 0: toast.innerHTML = Workload.year + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件"; break; case 1: toast.innerHTML = Workload.year + "-" + Workload.month + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件"; break; case 2: toast.innerHTML = this.whichDay + "点" + Workload.Data[this.whichDay] + "件"; break; case 3: toast.innerHTML = this.whichDay - 1 + Workload.day + "天" + Workload.Data[this.whichDay - 1 + Workload.day] + "件"; break; } toast.style.display = "block"; toast.style.left = e.x + 20 + "px"; toast.style.top = e.y + 20 + "px"; }; Workload.mouseEvent.push(o); } context.fillStyle = "#42b983"; context.fillRect(x, Workload.getY(0) - y, width, y); } //添加事件到DOM上面 canvas.addEventListener("mousemove", function (e) { var onRect = false; //循环判断在哪个矩形上面 for (var i = 0; i < Workload.mouseEvent.length; i++) { var x = e.clientX - e.target.offsetLeft; var y = e.clientY - e.target.offsetTop; var oo = Workload.mouseEvent[i]; if (x >= oo.x && x <= oo.x + oo.width && y >= oo.y && y <= Workload.getY(0)) { oo.mouseMove(e); onRect = true; } } //如果鼠标不在任何一个矩形上面,那么让显示块消失 if (!onRect) { toast.innerHTML = ""; toast.style.display = "none"; } }); canvas.addEventListener("click", function (e) { //如果点击范围在关闭按钮上,那么关闭 if (e.clientX - e.target.offsetLeft >= Workload.width() - 37 && e.clientY - e.target.offsetTop <= 37) { close(); } }); function close() { $(canvas).attr("class", "workload_canvas"); ReactDOM.render(React.createElement(EmptyComponent, null), document.getElementById("workload_input_container")) } function open() { $(canvas).attr("class", "workload_canvas_on workload_canvas"); }
identifier_name
Workload.js
/** * Created by songchao on 16/6/25. */ /** * 入口方法,调用显示工作量 * @param employeeID */ function showWorkload(packageID) { if (packageID != undefined) { Workload.id = packageID; Workload.whichRequest = 1; } $(canvas).attr({width: $(canvas).width() + "px", height: $(canvas).height() + "px"}); setTimeout(function () { ReactDOM.render(React.createElement(WorkloadInputComponent, null), document.getElementById("workload_input_container")) }, 1000); draw(canvas); } //-----获取响应元素 var canvas = document.getElementById("workload_canvas"); var toast = document.getElementById("workload_toast"); //-----数据配置存储类 var Workload = { Data: [],//数据 type: 0,//类型,包括1,年,2,月,3,日,4自定义日期 year: undefined,//年 month: undefined,//月 day: undefined,//存放当前选择日期 days: undefined,//存放当前需要向服务器请求的日期数 rectNum: 0,//存放需要在图上绘制的矩形个数 mouseEvent: [],//时间绑定到这里 id: User.id,//请求的id,默认是用户的id whichRequest: 0,//那个组织的工足量?0,默认,个人工作量,1,营业网点工作量 width: function () { return $(canvas).width(); }, height: function () { return $(canvas).height(); }, offsetLeft: 40, offsetTop: 40, offsetRight: 40, offsetBottom: 40, arrowLength: 5, limitY: 0, unitLengthX: undefined, unitLengthY: function () { return (this.getYLength() - 40) / this.limitY; }, getRectMaxH: function () { return this.getYLength() - 40; }, getX: function (x) { return x + this.offsetLeft; }, getY: function (y) { return this.height() - this.offsetBottom - y; }, getXLength: function () { return this.width() - this.offsetLeft - this.offsetRight; }, getYLength: function () { return this.height() - this.offsetTop - this.offsetBottom; }, drawLine: function (context, x1, y1, x2, y2) { context.moveTo(x1, y1); context.lineTo(x2, y2); }, setXLimit: function () { //this.rectNum = this.Data.length; switch (this.type) { case 0: Workload.unitLengthX = (Workload.getXLength() - 40) / 24; break; case 1: Workload.unitLengthX = (Workload.getXLength() - 40) / 62; break; case 2: Workload.unitLengthX = (Workload.getXLength() - 40) / 48; break; case 3: if (this.rectNum != undefined) { Workload.unitLengthX = (Workload.getXLength() - 40) / (2 * this.rectNum); } break; } }, init: function () { this.mouseEvent.length = 0; this.setXLimit(); this.type = parseInt(this.type); this.year = parseInt(this.year); this.month = parseInt(this.month); this.day = parseInt(this.day); this.days = parseInt(this.days); this.rectNum = parseInt(this.rectNum); var dd = this.Data; this.limitY = Math.max.apply(null, dd); } }; /** * 工作量容器 */ var WorkloadInputComponent = React.createClass({displayName: "WorkloadInputComponent", getInitialState: function () { return { year: "2010", month: "1", day: "1", toDay: "", } }, yearChange: function (e) { var value = e.target.value; this.setState({year: value}); loadData(0, value); }, monthChange: function (e) { var value = e.target.value; this.setState({month: value}); loadData(1, this.state.year, value); }, dayChange: function (e) { var value = e.target.value; this.setState({day: value}); loadData(2, this.state.year, this.state.month, value); }, inputBlur: function (e) { var value = e.target.value; if (value != "") { this.setState({toDay: value}); loadData(3, this.state.year, this.state.month, this.state.day, value); } }, render: function () { var month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; var year = [2010, 2011, 2012, 2013, 2014, 2015, 2016]; var day = []; for (var i = 1; i <= 30; i++) { day.push(i); } return ( React.createElement("div", null, React.createElement("select", {onChange: this.yearChange, className: "form-control"}, year.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("select", {onChange: this.monthChange, className: "form-control"}, month.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("select", {onChange: this.dayChange, className: "form-control"}, day.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("input", {onBlur: this.inputBlur, type: "number", className: "form-control"}) ) ); } }); function loadData(type, year, month, day, toDay) { //type 1,year,2month,3day,4today //初始化workload类中的参数 if (month < 10) { month = "0" + month;
} if (day < 10) { day = "0" + day; } var fromTime = ""; var days = 0; switch (type) { case 0: Workload.year = year; fromTime = year + "-01-01"; Workload.days = 365; Workload.rectNum = 12; break; case 1: Workload.month = month; fromTime = year + "-" + month + "-01"; Workload.days = 31; Workload.rectNum = 31; break; case 2: Workload.day = day; fromTime = year + "-" + month + "-" + day; Workload.days = 1; Workload.rectNum = 24; break; case 3: Workload.days = toDay; Workload.rectNum = toDay; fromTime = year + "-" + month + "-" + day; break; } var url = ""; if (Workload.whichRequest == 0) {//0是个人工作量 url = "/REST/Domain/getWork/employeeId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days; } else if (Workload.whichRequest == 1) {//1是网点工作量 url = "/REST/Domain/getWorkOfOutlets/outletId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days; } Tools.myAjax({ type: "get", url: url, success: function (data) { //通过时间区分工作量 handleData(data, type); }, error: function (data) { console.error(data); showDialog("dialog", "错误", "获取工作量错误fromtime:" + fromTime + "day:" + Workload.days, true); } }); } function handleData(data, type) { var load = []; var i = 0; if (type == 0) { //统计一年中每个月的工作量 for (i = 0; i < data.length; i++) { var dd = data[i]; var month = new Date(dd.outTime).getMonth() + 1; if (load[month] == undefined) { load[month] = 1; } else { load[month]++; } } } else if (type == 1) {//月 //统计一年中每个月的工作量 for (i = 0; i < data.length; i++) { var dd = data[i]; var day = new Date(dd.outTime).getDate(); if (load[day] == undefined) { load[day] = 1; } else { load[day]++; } } } else if (type == 2) {//日 for (i = 0; i < data.length; i++) { var dd = data[i]; var hour = new Date(dd.outTime).getHours(); if (load[hour] == undefined) { load[hour] = 1; } else { load[hour]++; } } } else if (type == 3) { for (i = 0; i < data.length; i++) { var dd = data[i]; var day = new Date(dd.outTime).getDate(); if (load[day] == undefined) { load[day] = 1; } else { load[day]++; } } } //把空值设为0 if (type == 3) { for (i = 0; i <= parseInt(Workload.day) + parseInt(Workload.days); i++) { if (load[i] == undefined) { load[i] = 0; } } } else { for (i = 0; i <= Workload.rectNum; i++) { if (load[i] == undefined) { load[i] = 0; } } } Workload.Data = load; Workload.type = type; draw(canvas); } //type,0,代表年,1代表月,2代表日 //开始绘画 function draw(canvas) { var context = canvas.getContext("2d"); context.fillStyle = "#ffffff"; context.fillRect(0, 0, Workload.width(), Workload.height()); context.font = "10px Georgia"; lineXY(context); drawCloseButton(context); Workload.init(); myClearInterval(); drawPic(context); } function myClearInterval() { //清除所有动画 for (var i = 0; i < time.length; i++) { clearInterval(time[i]); } time.length = 0; } function drawCloseButton(context) { var image = new Image(); image.src = "../images/index/close.png"; var imagex = Workload.width() - 37; var imagey = 5; if (image.complete) { context.drawImage(image, imagex, imagey); } else { image.onload = function () { context.drawImage(image, imagex, imagey); } } } //循环每一个矩形 function drawPic(context) { for (var i = 0; i <= Workload.rectNum; i++) { if (i == 0) { continue; } aniH(context, i, 0); } open(); } //动画实现 //-----bug---动画进行中切换数据会出错 var time = []; function aniH(context, i, hh) { var geadd = Workload.limitY / 2500; var getime = 10; var signTime = setInterval(function () { //通过i-1,把i==0,占用的空间去掉 createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i); if (hh >= (Workload.type == 3 ? Workload.Data[Workload.day + i - 1] : Workload.Data[i])) { window.clearInterval(signTime); createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i, true); } }, 10); time.push(signTime); } //画出xy轴 function lineXY(context) { //画x,y轴 context.beginPath(); Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(0), Workload.getY(Workload.getYLength())); Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) - Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength); Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) + Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength); context.fillText("工作量", Workload.getX(0), Workload.getY(Workload.getYLength()) - 5); context.stroke(); Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(Workload.getXLength()), Workload.getY(0)); Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) - Workload.arrowLength); Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) + Workload.arrowLength); context.fillText("时间", Workload.getX(Workload.getXLength()) - 20, Workload.getY(0) - 20); context.stroke(); //画xy轴上的字 //y轴 context.fillText(Workload.limitY + "", Workload.getX(-20), Workload.getY(Workload.getRectMaxH())); context.fillText("啦啦啦", 20, 80); context.stroke(); } //创建矩形类,同时添加鼠标响应事件,同时把事件响应注册到数组里 function createRect(context, x, width, height, whichDay, isAddToEvent) { //每创建一个矩形就把它画出来 var y = height * Workload.unitLengthY(); if (isAddToEvent) { var o = new Object(); o.x = x; o.width = width; o.height = height; o.whichDay = whichDay; o.y = Workload.getY(0) - y; o.mouseMove = function (e) { switch (Workload.type) { case 0: toast.innerHTML = Workload.year + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件"; break; case 1: toast.innerHTML = Workload.year + "-" + Workload.month + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件"; break; case 2: toast.innerHTML = this.whichDay + "点" + Workload.Data[this.whichDay] + "件"; break; case 3: toast.innerHTML = this.whichDay - 1 + Workload.day + "天" + Workload.Data[this.whichDay - 1 + Workload.day] + "件"; break; } toast.style.display = "block"; toast.style.left = e.x + 20 + "px"; toast.style.top = e.y + 20 + "px"; }; Workload.mouseEvent.push(o); } context.fillStyle = "#42b983"; context.fillRect(x, Workload.getY(0) - y, width, y); } //添加事件到DOM上面 canvas.addEventListener("mousemove", function (e) { var onRect = false; //循环判断在哪个矩形上面 for (var i = 0; i < Workload.mouseEvent.length; i++) { var x = e.clientX - e.target.offsetLeft; var y = e.clientY - e.target.offsetTop; var oo = Workload.mouseEvent[i]; if (x >= oo.x && x <= oo.x + oo.width && y >= oo.y && y <= Workload.getY(0)) { oo.mouseMove(e); onRect = true; } } //如果鼠标不在任何一个矩形上面,那么让显示块消失 if (!onRect) { toast.innerHTML = ""; toast.style.display = "none"; } }); canvas.addEventListener("click", function (e) { //如果点击范围在关闭按钮上,那么关闭 if (e.clientX - e.target.offsetLeft >= Workload.width() - 37 && e.clientY - e.target.offsetTop <= 37) { close(); } }); function close() { $(canvas).attr("class", "workload_canvas"); ReactDOM.render(React.createElement(EmptyComponent, null), document.getElementById("workload_input_container")) } function open() { $(canvas).attr("class", "workload_canvas_on workload_canvas"); }
random_line_split
Workload.js
/** * Created by songchao on 16/6/25. */ /** * 入口方法,调用显示工作量 * @param employeeID */ function showWorkload(packageID) { if (packageID != undefined) { Workload.id = packageID; Workload.whichRequest = 1; } $(canvas).attr({width: $(canvas).width() + "px", height: $(canvas).height() + "px"}); setTimeout(function () { ReactDOM.render(React.createElement(WorkloadInputComponent, null), document.getElementById("workload_input_container")) }, 1000); draw(canvas); } //-----获取响应元素 var canvas = document.getElementById("workload_canvas"); var toast = document.getElementById("workload_toast"); //-----数据配置存储类 var Workload = { Data: [],//数据 type: 0,//类型,包括1,年,2,月,3,日,4自定义日期 year: undefined,//年 month: undefined,//月 day: undefined,//存放当前选择日期 days: undefined,//存放当前需要向服务器请求的日期数 rectNum: 0,//存放需要在图上绘制的矩形个数 mouseEvent: [],//时间绑定到这里 id: User.id,//请求的id,默认是用户的id whichRequest: 0,//那个组织的工足量?0,默认,个人工作量,1,营业网点工作量 width: function () { return $(canvas).width(); }, height: function () { return $(canvas).height(); }, offsetLeft: 40, offsetTop: 40, offsetRight: 40, offsetBottom: 40, arrowLength: 5, limitY: 0, unitLengthX: undefined, unitLengthY: function () { return (this.getYLength() - 40) / this.limitY; }, getRectMaxH: function () { return this.getYLength() - 40; }, getX: function (x) { return x + this.offsetLeft; }, getY: function (y) { return this.height() - this.offsetBottom - y; }, getXLength: function () { return this.width() - this.offsetLeft - this.offsetRight; }, getYLength: function () { return this.height() - this.offsetTop - this.offsetBottom; }, drawLine: function (context, x1, y1, x2, y2) { context.moveTo(x1, y1); context.lineTo(x2, y2); }, setXLimit: function () { //this.rectNum = this.Data.length; switch (this.type) { case 0: Workload.unitLengthX = (Workload.getXLength() - 40) / 24; break; case 1: Workload.unitLengthX = (Workload.getXLength() - 40) / 62; break; case 2: Workload.unitLengthX = (Workload.getXLength() - 40) / 48; break; case 3: if (this.rectNum != undefined) { Workload.unitLengthX = (Workload.getXLength() - 40) / (2 * this.rectNum); } break; } }, init: function () { this.mouseEvent.length = 0; this.setXLimit(); this.type = parseInt(this.type); this.year = parseInt(this.year); this.month = parseInt(this.month); this.day = parseInt(this.day); this.days = parseInt(this.days); this.rectNum = parseInt(this.rectNum); var dd = this.Data; this.limitY = Math.max.apply(null, dd); } }; /** * 工作量容器 */ var WorkloadInputComponent = React.createClass({displayName: "WorkloadInputComponent", getInitialState: function () { return { year: "2010", month: "1", day: "1", toDay: "", } }, yearChange: function (e) { var value = e.target.value; this.setState({year: value}); loadData(0, value); }, monthChange: function (e) { var value = e.target.value; this.setState({month: value}); loadData(1, this.state.year, value); }, dayChange: function (e) { var value = e.target.value; this.setState({day: value}); loadData(2, this.state.year, this.state.month, value); }, inputBlur: function (e) { var value = e.target.value; if (value != "") { this.setState({toDay: value}); loadData(3, this.state.year, this.state.month, this.state.day, value); } }, render: function () { var month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; var year = [2010, 2011, 2012, 2013, 2014, 2015, 2016]; var day = []; for (var i = 1; i <= 30; i++) { day.push(i); } return ( React.createElement("div", null, React.createElement("select", {onChange: this.yearChange, className: "form-control"}, year.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("select", {onChange: this.monthChange, className: "form-control"}, month.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("select", {onChange: this.dayChange, className: "form-control"}, day.map(function (data, index) { return React.createElement("option", {key: "option"+index, value: data}, data) }) ), React.createElement("input", {onBlur: this.inputBlur, type: "number", className: "form-control"}) ) ); } }); function loadData(type, year, month, day, toDay) { //type 1,year,2month,3day,4today //初始化workload类中的参数 if (month < 10) { month = "0" + month; } if (day < 10) { day = "0" + day; } var fromTime = ""; var days = 0; switch (type) { case 0: Workload.year = year; fromTime = year + "-01-01"; Workload.days = 365; Workload.rectNum = 12; break; case 1: Workload.month = month; fromTime = year + "-" + month + "-01"; Workload.days = 31; Workload.rectNum = 31; break; case 2: Workload.day = day; fromTime = year + "-" + month + "-" + day; Workload.days = 1; Workload.rectNum = 24; break; case 3: Workload.days = toDay; Workload.rectNum = toDay; fromTime = year + "-" + month + "-" + day; break; } var url = ""; if (Workload.whichRequest == 0) {//0是个人工作量 url = "/REST/Domain/getWork/employeeId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days; } else if (Workload.whichRequest == 1) {//1是网点工作量 url = "/REST/Domain/getWorkOfOutlets/outletId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days; } Tools.myAjax({ type: "get", url: url, success: function (data) { //通过时间区分工作量 handleData(data, type); }, error: function (data) { console.error(data); showDialog("dialog", "错误", "获取工作量错误fromtime:" + fromTime + "day:" + Workload.days, true); } }); } function handleData(data, type) { var load = []; var i = 0; if (type == 0) { //统计一年中每个月的工作量 for (i = 0; i < data.length; i++) { var dd = data[i]; var month = new Date(dd.outTime).getMonth() + 1; if (load[month] == undefined) { load[month] = 1; } else {
() { //清除所有动画 for (var i = 0; i < time.length; i++) { clearInterval(time[i]); } time.length = 0; } function drawCloseButton(context) { var image = new Image(); image.src = "../images/index/close.png"; var imagex = Workload.width() - 37; var imagey = 5; if (image.complete) { context.drawImage(image, imagex, imagey); } else { image.onload = function () { context.drawImage(image, imagex, imagey); } } } //循环每一个矩形 function drawPic(context) { for (var i = 0; i <= Workload.rectNum; i++) { if (i == 0) { continue; } aniH(context, i, 0); } open(); } //动画实现 //-----bug---动画进行中切换数据会出错 var time = []; function aniH(context, i, hh) { var geadd = Workload.limitY / 2500; var getime = 10; var signTime = setInterval(function () { //通过i-1,把i==0,占用的空间去掉 createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i); if (hh >= (Workload.type == 3 ? Workload.Data[Workload.day + i - 1] : Workload.Data[i])) { window.clearInterval(signTime); createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i, true); } }, 10); time.push(signTime); } //画出xy轴 function lineXY(context) { //画x,y轴 context.beginPath(); Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(0), Workload.getY(Workload.getYLength())); Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) - Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength); Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) + Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength); context.fillText("工作量", Workload.getX(0), Workload.getY(Workload.getYLength()) - 5); context.stroke(); Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(Workload.getXLength()), Workload.getY(0)); Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) - Workload.arrowLength); Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) + Workload.arrowLength); context.fillText("时间", Workload.getX(Workload.getXLength()) - 20, Workload.getY(0) - 20); context.stroke(); //画xy轴上的字 //y轴 context.fillText(Workload.limitY + "", Workload.getX(-20), Workload.getY(Workload.getRectMaxH())); context.fillText("啦啦啦", 20, 80); context.stroke(); } //创建矩形类,同时添加鼠标响应事件,同时把事件响应注册到数组里 function createRect(context, x, width, height, whichDay, isAddToEvent) { //每创建一个矩形就把它画出来 var y = height * Workload.unitLengthY(); if (isAddToEvent) { var o = new Object(); o.x = x; o.width = width; o.height = height; o.whichDay = whichDay; o.y = Workload.getY(0) - y; o.mouseMove = function (e) { switch (Workload.type) { case 0: toast.innerHTML = Workload.year + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件"; break; case 1: toast.innerHTML = Workload.year + "-" + Workload.month + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件"; break; case 2: toast.innerHTML = this.whichDay + "点" + Workload.Data[this.whichDay] + "件"; break; case 3: toast.innerHTML = this.whichDay - 1 + Workload.day + "天" + Workload.Data[this.whichDay - 1 + Workload.day] + "件"; break; } toast.style.display = "block"; toast.style.left = e.x + 20 + "px"; toast.style.top = e.y + 20 + "px"; }; Workload.mouseEvent.push(o); } context.fillStyle = "#42b983"; context.fillRect(x, Workload.getY(0) - y, width, y); } //添加事件到DOM上面 canvas.addEventListener("mousemove", function (e) { var onRect = false; //循环判断在哪个矩形上面 for (var i = 0; i < Workload.mouseEvent.length; i++) { var x = e.clientX - e.target.offsetLeft; var y = e.clientY - e.target.offsetTop; var oo = Workload.mouseEvent[i]; if (x >= oo.x && x <= oo.x + oo.width && y >= oo.y && y <= Workload.getY(0)) { oo.mouseMove(e); onRect = true; } } //如果鼠标不在任何一个矩形上面,那么让显示块消失 if (!onRect) { toast.innerHTML = ""; toast.style.display = "none"; } }); canvas.addEventListener("click", function (e) { //如果点击范围在关闭按钮上,那么关闭 if (e.clientX - e.target.offsetLeft >= Workload.width() - 37 && e.clientY - e.target.offsetTop <= 37) { close(); } }); function close() { $(canvas).attr("class", "workload_canvas"); ReactDOM.render(React.createElement(EmptyComponent, null), document.getElementById("workload_input_container")) } function open() { $(canvas).attr("class", "workload_canvas_on workload_canvas"); }
load[month]++; } } } else if (type == 1) {//月 //统计一年中每个月的工作量 for (i = 0; i < data.length; i++) { var dd = data[i]; var day = new Date(dd.outTime).getDate(); if (load[day] == undefined) { load[day] = 1; } else { load[day]++; } } } else if (type == 2) {//日 for (i = 0; i < data.length; i++) { var dd = data[i]; var hour = new Date(dd.outTime).getHours(); if (load[hour] == undefined) { load[hour] = 1; } else { load[hour]++; } } } else if (type == 3) { for (i = 0; i < data.length; i++) { var dd = data[i]; var day = new Date(dd.outTime).getDate(); if (load[day] == undefined) { load[day] = 1; } else { load[day]++; } } } //把空值设为0 if (type == 3) { for (i = 0; i <= parseInt(Workload.day) + parseInt(Workload.days); i++) { if (load[i] == undefined) { load[i] = 0; } } } else { for (i = 0; i <= Workload.rectNum; i++) { if (load[i] == undefined) { load[i] = 0; } } } Workload.Data = load; Workload.type = type; draw(canvas); } //type,0,代表年,1代表月,2代表日 //开始绘画 function draw(canvas) { var context = canvas.getContext("2d"); context.fillStyle = "#ffffff"; context.fillRect(0, 0, Workload.width(), Workload.height()); context.font = "10px Georgia"; lineXY(context); drawCloseButton(context); Workload.init(); myClearInterval(); drawPic(context); } function myClearInterval
identifier_body
runningView.js
define([ '../../config/configTopics', '../tool', '../../lib/lib', '../command', '../dataFetch', '../../lib/underscore'], function(topics, tool, lib, command, dataFetch, _, tip){ var privateMethods, publicMethods, STAR_FLAG = 'star-'; privateMethods = { conditionNodes: { }, starRender: { /*continueRight: function(userData, condition){ var numId = STAR_FLAG + name+'-num', imageId = STAR_FLAG + name+'-image', numNodes = nodes[numId], imageNodes = nodes[imageId]; if(!numNodes){ numNodes = lib.g(numId); } if(!imageNodes){ imageNodes = lib.g(imageId); } numNodes.textContent = userData[name]; imageNodes.style.bottom = Math.floor(userData[name]/condition.value*100) + '%'; },*/ quickAnswer: function(userData, condition){ /*var nodes = privateMethods.conditionNodes, id = STAR_FLAG + 'quickAnswer' + condition.qualifiedTime numId = STAR_FLAG + condition. +'-num', imageId = STAR_FLAG + name+'-image', numNodes = nodes[numId], imageNodes = nodes[imageId];*/ }, allRight: function(userData, condition){ //numNodes.textContent = userData[name] ? '全部答对' : '未全部答对'; //imageNodes.style.bottom = // (userData[name] ? 100 : 0) + '%'; } }, conditionMethods: { //starTpl: _.template(lib.content('tpl-star')), right: function(value){ var data = { title: '答对', intro: '答对' + value + '题' } return data; //return lib.str2dom(this.starTpl(data)); }, continueRight: function(value){ var data = { title: '连对'+value+'题' } return data; //return lib.str2dom(this.starTpl(data)); }, allRight: function(data){ var data = { title: '全部答对' } return data; //return lib.str2dom(this.starTpl(data)); }, timeout: function(value){ var data = { title: '超时', intro: '超时少于' + value + '题' } return data; //return lib.str2dom(this.starTpl(data)); }, quickAnswer: function(value, condition){ var qualifiedTime = condition.qualifiedTime, title = qualifiedTime <=3 ? '秒答' : qualifiedTime + '秒答对' data = { title: title, intro: title +'题数达到'+value } return data; //return lib.str2dom(this.starTpl(data)); } }, nodes: { submit: lib.g('game-submit'), reset: lib.g('reset-topic'), back: lib.g('back-choice'), main: lib.g('to-main'), details: lib.g('game-details'), options: lib.g('game-options'), answerTip: lib.g('game-tip'), panel: lib.g('game-scene'), title: lib.g('game-title'), nextTopic: lib.g('game-next'), majorPass: lib.g('major-pass'), starPass: lib.g('star-pass'), countdownStep: lib.g('countdown-step') }, render: function(dom, data){ if(data.hide!==undefined){ lib.hide(dom); } if(data.text!==undefined){ dom.textContent = data.text; } if(data.html!==undefined){ dom.innerHTML = data.html; } }, extendCondition: function(conditions){ var methods = this.conditionMethods; if(conditions.major){ conditions.major.forEach(function(condition){ var renderData =
var renderData = methods[condition.name](condition.value, condition); lib.extend(condition, renderData); }); } }, scene: {}, tipObj: {}, tipPanel: lib.g('window-tip') } publicMethods = { hasInitEvents: false, initSet: function(id, ev, events, context){ privateMethods.scene[id] = { ev: ev, context: context }; //lib }, changeTo: function(id){ var nodes = privateMethods.nodes, scene = privateMethods.scene[id], events = scene.ev, context = scene.context; Object.keys(events).forEach(function(ev){ var data = events[ev], node = nodes[ev]; node.onclick = data.click ? function(){ data.click.call(context); } : null; // 默认显示 lib[ data.hide ? 'hide' : 'show' ](node); }); }, render: function(data){ var nodes = privateMethods.nodes, render = privateMethods.render; Object.keys(data).forEach(function(name){ render(nodes[name], data[name]); }); }, renderRemindItems: function(userData, conditions){ var vm = this.remindVm; conditions.forEach(function(condition){ vm[condition](userData[condition]); }); }, renderCondition: function(userData, conditions){ var vm = this.starViewModel, map = this.starMap, commonViewModel = this.commonViewModel, newValue; Object.keys(userData).forEach(function(key){ var indexData = map[key]; newValue = userData[key] if(indexData){ var typeData = vm[indexData.type](); typeData[indexData.index].userValue(newValue); } if(commonViewModel[key]){ commonViewModel[key](newValue); } }); return; }, /** * 展现各种浮层弹窗 * @param {string} id 浮层的id * @param {object} data 浮层的渲染数据 * @param {object} events model层提供的一些供view使用的方法 * @return {object} 浮层对象 */ showTip: function(id, data, events){ var tipObj = privateMethods.tipObj[id], panel = privateMethods.tipPanel, detail, tpl, toolVm; if(!tipObj){ tipObj = privateMethods.tipObj[id] = {}; tipObj.init = true; tipObj.tpl = _.template(lib.content(id+'-tpl')); tipObj.detail = lib.g(id+'-detail'); tipObj.toolVm = { hasPass: ko.observable(data.hasPass) } lib.g(id+'-tool').onclick = function(e){ var target = e.target, clickEvent; if(target&&(clickEvent=lib.attr(target, 'pt-click'))){ events[clickEvent]&&events[clickEvent].call(events); } } ko.applyBindings(tipObj.toolVm, lib.g(id+'-tool')); } tpl = tipObj.tpl; detail = tipObj.detail; privateMethods.extendCondition(data) detail.innerHTML = tpl(data); if(privateMethods.tipOldId){ lib.removeClass(panel, privateMethods.tipOldId); } lib.addClass(panel, id); privateMethods.tipOldId = id; lib.show(panel); toolVm = tipObj.toolVm; Object.keys(data).forEach(function(d){ if(d in toolVm){ toolVm[d](data[d]); } }); return tipObj; }, hideTip: function(){ lib.hide(privateMethods.tipPanel); }, starViewModel: null, commonViewModel: null, starMap: {}, initPassConditon: function(data, userData){ var conditionData = {}, map = this.starMap = {}, methods = privateMethods.conditionMethods; for(var key in data){ if(data.hasOwnProperty(key)){ var cd = conditionData[key] = []; data[key].forEach(function(condition, index){ var renderData = methods[condition.name](condition.value, condition); lib.extend(renderData, condition); renderData.userValue = ko.observable(userData[renderData.id]); renderData.percent = ko.computed(function() { var per= this.userValue()/this.value; return -(100-Math.floor((per>1?1:per)*100))+"%" ; }, renderData); cd.push(renderData); map[renderData.id] = { type: key, index: index } }); } } lib[ data.star ? 'show' : 'hide' ](privateMethods.nodes.starPass); if(!this.starViewModel){ this.starViewModel = { major: ko.observableArray(conditionData.major), star: ko.observableArray(conditionData.star) } ko.applyBindings(this.starViewModel, lib.g('star-panel')); var commonViewModel = this.commonViewModel = {}; for(var key in userData){ if(userData.hasOwnProperty(key)){ commonViewModel[key] = ko.observable(userData[key]); } } ko.applyBindings(commonViewModel, lib.g('game-command')); }else{ var vmMajor = this.starViewModel.major, vmStar = this.starViewModel.star, len = Number.MAX_VALUE; vmMajor.splice.apply(vmMajor, [0,len].concat(conditionData.major)); vmStar.splice.apply(vmStar, [0,len].concat(conditionData.star)); } }, initOverConditon: function(){ }, remindVm: null, initRemindItems: function(items, userData){ var vm = this.remindVm; if(!vm){ vm = this.remindVm = { right: ko.observable(undefined), continueRight: ko.observable(undefined) } ko.applyBindings(vm, lib.g('remind-items')); } items.forEach(function(item){ if(vm[item]){ vm[item](userData[item]); } }); }, initCondition: function(data, userData){ if(data.major||data.star){ this.initPassConditon(data, userData); lib.show(lib.g('star-panel')); }else{ lib.hide(lib.g('star-panel')); } if(data.remindItems){ this.initRemindItems(data.remindItems, userData); lib.show(lib.g('remind-items')); }else{ lib.hide(lib.g('remind-items')); } }, processViewModel: null, renderProcess: function(data){ if(data.totalTime!==undefined||data.restTime!==undefined){ if(!this.processViewModel){ this.processViewModel = { restTime: ko.observable(data.restTime), totalTime: ko.observable(data.totalTime) } this.processViewModel.per = ko.computed(function() { return this.restTime()/this.totalTime()*100; }, this.processViewModel); this.processViewModel.showTime = ko.computed(function() { var restTime = this.restTime(), timeTip = (restTime%60) + '秒'; if(restTime>59){ timeTip = Math.floor(restTime/60) + '分' + timeTip; } return timeTip; }, this.processViewModel); ko.applyBindings(this.processViewModel, lib.g('process-time-wrap')); }else{ var vm = this.processViewModel; if(data.restTime){ vm.restTime(data.restTime); } if(data.totalTime){ vm.totalTime(data.totalTime); } } } return; if(data.maxStep){ if(!this.processViewModel){ this.processViewModel = { maxStep: ko.observable(data.maxStep), step: ko.observable(data.step) } this.processViewModel.per = ko.computed(function() { return Math.floor(this.step()/this.maxStep()*100); }, this.processViewModel); ko.applyBindings(this.processViewModel, lib.g('process-wrap')); }else{ var vm = this.processViewModel; if(data.maxStep){ vm.maxStep(data.maxStep); } if(data.step){ vm.step(data.step); } } } }, renderCountdown: function(time){ privateMethods.nodes.countdownStep.textContent = time; }, getView: function(name){ return privateMethods.nodes[name]; } } return lib.superClass(privateMethods, publicMethods); });
methods[condition.name](condition.value, condition); lib.extend(condition, renderData); }); } if(conditions.star){ conditions.star.forEach(function(condition){
conditional_block
runningView.js
define([ '../../config/configTopics', '../tool', '../../lib/lib', '../command', '../dataFetch', '../../lib/underscore'], function(topics, tool, lib, command, dataFetch, _, tip){ var privateMethods, publicMethods, STAR_FLAG = 'star-'; privateMethods = { conditionNodes: { }, starRender: { /*continueRight: function(userData, condition){ var numId = STAR_FLAG + name+'-num', imageId = STAR_FLAG + name+'-image', numNodes = nodes[numId], imageNodes = nodes[imageId]; if(!numNodes){ numNodes = lib.g(numId); } if(!imageNodes){ imageNodes = lib.g(imageId); } numNodes.textContent = userData[name]; imageNodes.style.bottom = Math.floor(userData[name]/condition.value*100) + '%'; },*/ quickAnswer: function(userData, condition){ /*var nodes = privateMethods.conditionNodes, id = STAR_FLAG + 'quickAnswer' + condition.qualifiedTime numId = STAR_FLAG + condition. +'-num', imageId = STAR_FLAG + name+'-image', numNodes = nodes[numId], imageNodes = nodes[imageId];*/ }, allRight: function(userData, condition){ //numNodes.textContent = userData[name] ? '全部答对' : '未全部答对'; //imageNodes.style.bottom = // (userData[name] ? 100 : 0) + '%'; } }, conditionMethods: { //starTpl: _.template(lib.content('tpl-star')), right: function(value){ var data = { title: '答对', intro: '答对' + value + '题' } return data; //return lib.str2dom(this.starTpl(data)); }, continueRight: function(value){ var data = { title: '连对'+value+'题' } return data; //return lib.str2dom(this.starTpl(data)); }, allRight: function(data){ var data = { title: '全部答对' } return data; //return lib.str2dom(this.starTpl(data)); }, timeout: function(value){ var data = { title: '超时', intro: '超时少于' + value + '题' } return data; //return lib.str2dom(this.starTpl(data)); }, quickAnswer: function(value, condition){ var qualifiedTime = condition.qualifiedTime, title = qualifiedTime <=3 ? '秒答' : qualifiedTime + '秒答对' data = { title: title, intro: title +'题数达到'+value } return data; //return lib.str2dom(this.starTpl(data)); } }, nodes: { submit: lib.g('game-submit'), reset: lib.g('reset-topic'), back: lib.g('back-choice'), main: lib.g('to-main'), details: lib.g('game-details'), options: lib.g('game-options'), answerTip: lib.g('game-tip'), panel: lib.g('game-scene'), title: lib.g('game-title'), nextTopic: lib.g('game-next'), majorPass: lib.g('major-pass'), starPass: lib.g('star-pass'), countdownStep: lib.g('countdown-step') }, render: function(dom, data){ if(data.hide!==undefined){ lib.hide(dom); } if(data.text!==undefined){ dom.textContent = data.text; } if(data.html!==undefined){ dom.innerHTML = data.html; } }, extendCondition: function(conditions){ var methods = this.conditionMethods; if(conditions.major){ conditions.major.forEach(function(condition){ var renderData = methods[condition.name](condition.value, condition); lib.extend(condition, renderData); }); } if(conditions.star){ conditions.star.forEach(function(condition){ var renderData = methods[condition.name](condition.value, condition); lib.extend(condition, renderData); }); } }, scene: {}, tipObj: {}, tipPanel: lib.g('window-tip') } publicMethods = { hasInitEvents: false, initSet: function(id, ev, events, context){ privateMethods.scene[id] = { ev: ev, context: context
changeTo: function(id){ var nodes = privateMethods.nodes, scene = privateMethods.scene[id], events = scene.ev, context = scene.context; Object.keys(events).forEach(function(ev){ var data = events[ev], node = nodes[ev]; node.onclick = data.click ? function(){ data.click.call(context); } : null; // 默认显示 lib[ data.hide ? 'hide' : 'show' ](node); }); }, render: function(data){ var nodes = privateMethods.nodes, render = privateMethods.render; Object.keys(data).forEach(function(name){ render(nodes[name], data[name]); }); }, renderRemindItems: function(userData, conditions){ var vm = this.remindVm; conditions.forEach(function(condition){ vm[condition](userData[condition]); }); }, renderCondition: function(userData, conditions){ var vm = this.starViewModel, map = this.starMap, commonViewModel = this.commonViewModel, newValue; Object.keys(userData).forEach(function(key){ var indexData = map[key]; newValue = userData[key] if(indexData){ var typeData = vm[indexData.type](); typeData[indexData.index].userValue(newValue); } if(commonViewModel[key]){ commonViewModel[key](newValue); } }); return; }, /** * 展现各种浮层弹窗 * @param {string} id 浮层的id * @param {object} data 浮层的渲染数据 * @param {object} events model层提供的一些供view使用的方法 * @return {object} 浮层对象 */ showTip: function(id, data, events){ var tipObj = privateMethods.tipObj[id], panel = privateMethods.tipPanel, detail, tpl, toolVm; if(!tipObj){ tipObj = privateMethods.tipObj[id] = {}; tipObj.init = true; tipObj.tpl = _.template(lib.content(id+'-tpl')); tipObj.detail = lib.g(id+'-detail'); tipObj.toolVm = { hasPass: ko.observable(data.hasPass) } lib.g(id+'-tool').onclick = function(e){ var target = e.target, clickEvent; if(target&&(clickEvent=lib.attr(target, 'pt-click'))){ events[clickEvent]&&events[clickEvent].call(events); } } ko.applyBindings(tipObj.toolVm, lib.g(id+'-tool')); } tpl = tipObj.tpl; detail = tipObj.detail; privateMethods.extendCondition(data) detail.innerHTML = tpl(data); if(privateMethods.tipOldId){ lib.removeClass(panel, privateMethods.tipOldId); } lib.addClass(panel, id); privateMethods.tipOldId = id; lib.show(panel); toolVm = tipObj.toolVm; Object.keys(data).forEach(function(d){ if(d in toolVm){ toolVm[d](data[d]); } }); return tipObj; }, hideTip: function(){ lib.hide(privateMethods.tipPanel); }, starViewModel: null, commonViewModel: null, starMap: {}, initPassConditon: function(data, userData){ var conditionData = {}, map = this.starMap = {}, methods = privateMethods.conditionMethods; for(var key in data){ if(data.hasOwnProperty(key)){ var cd = conditionData[key] = []; data[key].forEach(function(condition, index){ var renderData = methods[condition.name](condition.value, condition); lib.extend(renderData, condition); renderData.userValue = ko.observable(userData[renderData.id]); renderData.percent = ko.computed(function() { var per= this.userValue()/this.value; return -(100-Math.floor((per>1?1:per)*100))+"%" ; }, renderData); cd.push(renderData); map[renderData.id] = { type: key, index: index } }); } } lib[ data.star ? 'show' : 'hide' ](privateMethods.nodes.starPass); if(!this.starViewModel){ this.starViewModel = { major: ko.observableArray(conditionData.major), star: ko.observableArray(conditionData.star) } ko.applyBindings(this.starViewModel, lib.g('star-panel')); var commonViewModel = this.commonViewModel = {}; for(var key in userData){ if(userData.hasOwnProperty(key)){ commonViewModel[key] = ko.observable(userData[key]); } } ko.applyBindings(commonViewModel, lib.g('game-command')); }else{ var vmMajor = this.starViewModel.major, vmStar = this.starViewModel.star, len = Number.MAX_VALUE; vmMajor.splice.apply(vmMajor, [0,len].concat(conditionData.major)); vmStar.splice.apply(vmStar, [0,len].concat(conditionData.star)); } }, initOverConditon: function(){ }, remindVm: null, initRemindItems: function(items, userData){ var vm = this.remindVm; if(!vm){ vm = this.remindVm = { right: ko.observable(undefined), continueRight: ko.observable(undefined) } ko.applyBindings(vm, lib.g('remind-items')); } items.forEach(function(item){ if(vm[item]){ vm[item](userData[item]); } }); }, initCondition: function(data, userData){ if(data.major||data.star){ this.initPassConditon(data, userData); lib.show(lib.g('star-panel')); }else{ lib.hide(lib.g('star-panel')); } if(data.remindItems){ this.initRemindItems(data.remindItems, userData); lib.show(lib.g('remind-items')); }else{ lib.hide(lib.g('remind-items')); } }, processViewModel: null, renderProcess: function(data){ if(data.totalTime!==undefined||data.restTime!==undefined){ if(!this.processViewModel){ this.processViewModel = { restTime: ko.observable(data.restTime), totalTime: ko.observable(data.totalTime) } this.processViewModel.per = ko.computed(function() { return this.restTime()/this.totalTime()*100; }, this.processViewModel); this.processViewModel.showTime = ko.computed(function() { var restTime = this.restTime(), timeTip = (restTime%60) + '秒'; if(restTime>59){ timeTip = Math.floor(restTime/60) + '分' + timeTip; } return timeTip; }, this.processViewModel); ko.applyBindings(this.processViewModel, lib.g('process-time-wrap')); }else{ var vm = this.processViewModel; if(data.restTime){ vm.restTime(data.restTime); } if(data.totalTime){ vm.totalTime(data.totalTime); } } } return; if(data.maxStep){ if(!this.processViewModel){ this.processViewModel = { maxStep: ko.observable(data.maxStep), step: ko.observable(data.step) } this.processViewModel.per = ko.computed(function() { return Math.floor(this.step()/this.maxStep()*100); }, this.processViewModel); ko.applyBindings(this.processViewModel, lib.g('process-wrap')); }else{ var vm = this.processViewModel; if(data.maxStep){ vm.maxStep(data.maxStep); } if(data.step){ vm.step(data.step); } } } }, renderCountdown: function(time){ privateMethods.nodes.countdownStep.textContent = time; }, getView: function(name){ return privateMethods.nodes[name]; } } return lib.superClass(privateMethods, publicMethods); });
}; //lib },
random_line_split
p6.py
# coding: utf-8 # ## Question 6 # ### Ali Mortazavi # ### 96131044 # # In many pattern recognition applications, Sample Generation plays an important role, where it is # necessary to generate samples which are to be normally distributed according to a given expected # vector and a covariance matrix.<bn> # In this problem, you are going to do this technique yourself. You will also practice some more # complicated matrix operations as well. # In[1]: import random import numpy as np from matplotlib import pyplot import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.decomposition import PCA import pandas as pd import numpy as np from numpy.linalg import inv import seaborn as sns from numpy import linalg np.set_printoptions(suppress=True) # #### a) Generate samples from three normal distributions specified by the following parameters: <br> # $n=1, N=500, \mu=500 \sigma=1,2,3$ <br> # Plot the samples, as well as the histograms associated with each of the distributions. # Compare the results. # In[2]: mu = 500 sigmas = [1,2,3] colors = ["orange", "blue", "green"] N = 500 bins = 50 print ("HISTOGRAM and Data Points") for i in range(0, len(sigmas)): s = np.random.normal(mu, sigmas[i], N) print ("mu", mu, "Sigma", sigmas[i]) sns.distplot(s, color=colors[i], bins=bins) plt.scatter (s, np.zeros(len(s)), s=0.2, color=colors[i]) plt.xlim([485, 515]) plt.show() # # for i in range(0, len(sigmas)): # s = np.random.normal(mu, sigmas[i], N) # print ("mu", mu, "Sigma", sigmas[i]) # plt.scatter (s, np.zeros(len(s)), s=0.1, color=colors[i]) # plt.xlim([485, 515]) # plt.show() # #### b) Generate samples from a normal distributions specified by the following parameters: <br> # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 1 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 2 & 1 \\ # 1 & 3 # \end{bmatrix}$ <br> # Display the samples, as well as the associated contour plot. # $ # \begin{pmatrix} # 2 & 3 & 1 \ # 0.5 & 2 & -1 \ # -1 & 5 & -7 # \end{pmatrix} # $ # In[3]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 20 (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() samples_for_f = samples # #### c. Consider a normal distribution specified by the following parameters: # $ n = 2, N = 500, M = \begin{bmatrix} # m_1 \\ # m_2 # \end{bmatrix}, \Sigma = \begin{bmatrix} # \sigma_{11} & \sigma_{12} \\ # \sigma_{21} & \sigma_{22} # \end{bmatrix}$ <br> # Determine appropriate values for each of the unknown variables, so that the shape of the # distribution becomes: <br> # > c.1) A circle in the upper left of the Euclidean coordinate system. <br> # > c.2) A diagonal line (/ shape) in the centre<br> # > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br> # # # Display the generated samples. <br> # c.1) A circle in the upper left of the Euclidean coordinate system: # $$ m_1 < 0 , m_2 > 0, \Sigma = I $$ # In[4]: def
(): plt.xlim([-10, 10]) plt.ylim([-10, 10]) ax = plt.gca() ax.spines['top'].set_color('none') ax.spines['bottom'].set_position('zero') ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') # In[5]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N) setting_function() plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.2) A diagonal line (/ shape) in the centre<br> # We have to select M to be $(0,0)$ for being in the center. <br> # to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br> # To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $ # # In[6]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system # # In[7]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # #### d) Consider a random variable with # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 3 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 1 & 2\rho \\ # 2 \rho & 4 # \end{bmatrix}$ <br> # #### compute $d^2(x)$ analytically, if the parameters are: # $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$ # If $ \Sigma^{-1} = \begin{bmatrix} # a & b \\ # c & d # \end{bmatrix}$ <br> then: # $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br> # for $\rho =-0.99$: # $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$ # In[8]: levels=[4,9,16] plt.xlim(-10,10) plt.ylim(-10,10) X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1] xy = np.vstack((X.flatten(), Y.flatten())).T Sigmas = [] vals = [-0.99, -0.5, 0.5, 0.99] for val in vals: Sigmas.append(np.array([[1,2*val],[2*val,4]])) mu = np.array([2,3]) #np.array([0,0])# np.array([2,3]) def d_squared (xy, sigma, mu): matrix = inv(sigma) a = xy - mu t = np.matmul(a,matrix) return np.matmul(t, a.T) for sigma in Sigmas: print ("Sigma is") print (sigma) print ("Formula") t= inv(sigma) a = t[0][0] b = t[0][1] c = t[1][0] d = t[1][1] s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0" print (s) Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu) Z = np.reshape(Z, (len(X), -1)) plt.contour(X,Y,Z, levels=[4,9,16]) plt.show() # #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results. # In[9]: #Just repeating part b: N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 10 plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[10]: estimated_mean = np.mean(samples, axis=0) print ("estimated mean", estimated_mean) print ("real mean [2,1]") estimated_var = 0 for i in range(0, len(samples)): vec = np.array([samples[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) print ("real sigma:") print (np.array([[2,1],[1,3]]) ) # Comment: The estimated mean and sigma are close to real mean and real sigma. <br> # Since these estimations are consistent and not biased, the estimation will become close to the real value as the number of samples become large enough. # #### g) Simultaneously diagonalise $\sigma$ and $\hat{\sigma}$ , and form a vector $ V = [\lambda_1, \lambda_2]^T $ # # In[11]: real_sigma = np.array([[2,1],[1,3]]) # First, whiten the estimated sigma: estimated_sigma = estimated_var w, v = linalg.eig(estimated_sigma) normalizer = np.diag(np.sqrt(1/w)) first_transformation = np.matmul(v, normalizer) print ("first transformation") print (first_transformation) print ("checking whether does it transform sigma to I") estimated_sigma_2 = np.matmul(first_transformation.T,( np.matmul(estimated_sigma, first_transformation))) print (estimated_sigma_2) print (v) first_transformation = first_transformation.T # Test on sampled data whiten_samples = [] for i in range(0, len(samples)): whiten_samples.append(np.matmul(first_transformation, samples[i])) whiten_samples = np.asarray(whiten_samples) # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') plt.scatter (whiten_samples[:,0], whiten_samples[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(whiten_samples[:, 0], whiten_samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[12]: #now for the real sigma # calculate real sigma after first transformation real_sigma_2 = np.matmul(first_transformation, np.matmul(real_sigma, first_transformation.T)) w,v = linalg.eig(real_sigma_2) second_transformation = v.T #checking whether this transformation works properly or not real_sigma_3 = np.matmul(second_transformation, np.matmul(real_sigma_2, second_transformation.T)) print ("real sigma after transformation") print (real_sigma_3) # Now check that this transformation makes no change in the estimaed sigma: print ("estimated sigma after transformation") estimated_sigma_3 = np.matmul(second_transformation, np.matmul(estimated_sigma_2, second_transformation.T)) print (estimated_sigma_3) # In[13]: #So we have to apply this new transformation to the sample data import sys transformed_whiten = [] for i in range(0, len(whiten_samples)): transformed_whiten.append(np.matmul(second_transformation, whiten_samples[i])) transformed_whiten = np.asarray(transformed_whiten) plt.gca().set_aspect('equal') plt.scatter (transformed_whiten[:,0], transformed_whiten[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_whiten[:, 0], transformed_whiten[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # V = [1.02404447 1.0067168 ] # #### h) Find a transformation for covariance matrix of the distribution in part b., such that when applied on the data, the covariance matrix of the transformed data becomes I . Transform the data and display the distribution in the new space. # In[14]: w,v = linalg.eig(real_sigma) normalizer = np.diag(np.sqrt(1/w)) transformation = (np.matmul(v, normalizer)).T transformed_version = [] for i in range(0, len(samples)): transformed_version.append(np.matmul(transformation, samples[i])) transformed_version = np.asarray(transformed_version) plt.gca().set_aspect('equal') plt.scatter (transformed_version[:,0], transformed_version[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_version[:, 0], transformed_version[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # #### i) Calculate the eigenvalues and eigenvectors associated with the covariance matrix of the distribution in part b. Plot the eigenvectors. What can you infer from them? # # $$ det (\begin{bmatrix} # 2-\lambda & 1 \\ # 1 & 3 - \lambda # \end{bmatrix}) = 0 \rightarrow \lambda^2 - 5\lambda + 5 =0 \rightarrow \lambda_1 = 1.38, \lambda_2= 3.62 $$ # $$ v_1= (-0.85, 0.52) , v_2 = (-0.52, -0.85) $$ # In[15]: w,v = linalg.eig(real_sigma) N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) magnitude = -2 plt.plot([2,2 + magnitude * w[0] * v[0][0]], [1, 1+magnitude * w[0] * v[1][0]], color="red") plt.plot([2,2 +magnitude * w[1] * v[0][1]], [1, 1+magnitude * w[1] *v[1][1]], color="yellow") plt.show() bins= 10 # As you can see, the eigen vectors are in the direction of ellipsoid axises. The one with the bigger eigen value is in the direction of semi major axis. # #### j) Again, consider the distribution and samples you generated in part b. Construct a 2*2 matrix P , which has eigenvectors associated with $\Sigma$ as its columns. Project your generated samples to a new space using $Y_i = (X_i -M)*P $, and plot the samples. What differences do you notice? # In[16]: P = np.array([[v[0][1], v[0][0]],[v[1][1], v[1][0]]]) new_samples = [] mean = np.array([2,1]) print (np.matmul((samples[0]-mean), P)) for i in range(0, len(samples)): new_samples.append(np.matmul((samples[i]-mean), P)) new_samples = np.asarray(new_samples) plt.xlim([-10,10]) plt.ylim([-10,10]) plt.gca().set_aspect('equal') plt.scatter (new_samples[:,0], new_samples[:,1], s=0.2) plt.show() # The difference between this transformation and the previous one is that this transformation is in the **right side** of the vector while the prevoius one was in the **left side**.<br> # So we can use **eigenvector matrix** in the as right side transform or **transposed of (eigenvector matrix)** in the left side. # #### k) Find the covariance matrix associated with the projected samples in part h. Also calculate its eigenvalues and eigenvectors, and comment on the results. # In[17]: estimated_mean = np.mean(transformed_version, axis=0) print ("estimated mean \n", estimated_mean) estimated_var = 0 for i in range(0, len(transformed_version)): vec = np.array([transformed_version[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) # In[18]: w,v =linalg.eig(estimated_var) print (w) print (v) # Since this covariance matrix is actually identity matrix, its eigenvalues is 1 and its eigenvectors is (1,0) and (0,1)
setting_function
identifier_name
p6.py
# coding: utf-8 # ## Question 6 # ### Ali Mortazavi # ### 96131044 # # In many pattern recognition applications, Sample Generation plays an important role, where it is # necessary to generate samples which are to be normally distributed according to a given expected # vector and a covariance matrix.<bn> # In this problem, you are going to do this technique yourself. You will also practice some more # complicated matrix operations as well. # In[1]: import random import numpy as np from matplotlib import pyplot import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.decomposition import PCA import pandas as pd import numpy as np from numpy.linalg import inv import seaborn as sns from numpy import linalg np.set_printoptions(suppress=True) # #### a) Generate samples from three normal distributions specified by the following parameters: <br> # $n=1, N=500, \mu=500 \sigma=1,2,3$ <br> # Plot the samples, as well as the histograms associated with each of the distributions. # Compare the results. # In[2]: mu = 500 sigmas = [1,2,3] colors = ["orange", "blue", "green"] N = 500 bins = 50 print ("HISTOGRAM and Data Points") for i in range(0, len(sigmas)):
# # for i in range(0, len(sigmas)): # s = np.random.normal(mu, sigmas[i], N) # print ("mu", mu, "Sigma", sigmas[i]) # plt.scatter (s, np.zeros(len(s)), s=0.1, color=colors[i]) # plt.xlim([485, 515]) # plt.show() # #### b) Generate samples from a normal distributions specified by the following parameters: <br> # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 1 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 2 & 1 \\ # 1 & 3 # \end{bmatrix}$ <br> # Display the samples, as well as the associated contour plot. # $ # \begin{pmatrix} # 2 & 3 & 1 \ # 0.5 & 2 & -1 \ # -1 & 5 & -7 # \end{pmatrix} # $ # In[3]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 20 (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() samples_for_f = samples # #### c. Consider a normal distribution specified by the following parameters: # $ n = 2, N = 500, M = \begin{bmatrix} # m_1 \\ # m_2 # \end{bmatrix}, \Sigma = \begin{bmatrix} # \sigma_{11} & \sigma_{12} \\ # \sigma_{21} & \sigma_{22} # \end{bmatrix}$ <br> # Determine appropriate values for each of the unknown variables, so that the shape of the # distribution becomes: <br> # > c.1) A circle in the upper left of the Euclidean coordinate system. <br> # > c.2) A diagonal line (/ shape) in the centre<br> # > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br> # # # Display the generated samples. <br> # c.1) A circle in the upper left of the Euclidean coordinate system: # $$ m_1 < 0 , m_2 > 0, \Sigma = I $$ # In[4]: def setting_function (): plt.xlim([-10, 10]) plt.ylim([-10, 10]) ax = plt.gca() ax.spines['top'].set_color('none') ax.spines['bottom'].set_position('zero') ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') # In[5]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N) setting_function() plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.2) A diagonal line (/ shape) in the centre<br> # We have to select M to be $(0,0)$ for being in the center. <br> # to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br> # To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $ # # In[6]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system # # In[7]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # #### d) Consider a random variable with # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 3 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 1 & 2\rho \\ # 2 \rho & 4 # \end{bmatrix}$ <br> # #### compute $d^2(x)$ analytically, if the parameters are: # $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$ # If $ \Sigma^{-1} = \begin{bmatrix} # a & b \\ # c & d # \end{bmatrix}$ <br> then: # $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br> # for $\rho =-0.99$: # $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$ # In[8]: levels=[4,9,16] plt.xlim(-10,10) plt.ylim(-10,10) X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1] xy = np.vstack((X.flatten(), Y.flatten())).T Sigmas = [] vals = [-0.99, -0.5, 0.5, 0.99] for val in vals: Sigmas.append(np.array([[1,2*val],[2*val,4]])) mu = np.array([2,3]) #np.array([0,0])# np.array([2,3]) def d_squared (xy, sigma, mu): matrix = inv(sigma) a = xy - mu t = np.matmul(a,matrix) return np.matmul(t, a.T) for sigma in Sigmas: print ("Sigma is") print (sigma) print ("Formula") t= inv(sigma) a = t[0][0] b = t[0][1] c = t[1][0] d = t[1][1] s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0" print (s) Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu) Z = np.reshape(Z, (len(X), -1)) plt.contour(X,Y,Z, levels=[4,9,16]) plt.show() # #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results. # In[9]: #Just repeating part b: N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 10 plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[10]: estimated_mean = np.mean(samples, axis=0) print ("estimated mean", estimated_mean) print ("real mean [2,1]") estimated_var = 0 for i in range(0, len(samples)): vec = np.array([samples[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) print ("real sigma:") print (np.array([[2,1],[1,3]]) ) # Comment: The estimated mean and sigma are close to real mean and real sigma. <br> # Since these estimations are consistent and not biased, the estimation will become close to the real value as the number of samples become large enough. # #### g) Simultaneously diagonalise $\sigma$ and $\hat{\sigma}$ , and form a vector $ V = [\lambda_1, \lambda_2]^T $ # # In[11]: real_sigma = np.array([[2,1],[1,3]]) # First, whiten the estimated sigma: estimated_sigma = estimated_var w, v = linalg.eig(estimated_sigma) normalizer = np.diag(np.sqrt(1/w)) first_transformation = np.matmul(v, normalizer) print ("first transformation") print (first_transformation) print ("checking whether does it transform sigma to I") estimated_sigma_2 = np.matmul(first_transformation.T,( np.matmul(estimated_sigma, first_transformation))) print (estimated_sigma_2) print (v) first_transformation = first_transformation.T # Test on sampled data whiten_samples = [] for i in range(0, len(samples)): whiten_samples.append(np.matmul(first_transformation, samples[i])) whiten_samples = np.asarray(whiten_samples) # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') plt.scatter (whiten_samples[:,0], whiten_samples[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(whiten_samples[:, 0], whiten_samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[12]: #now for the real sigma # calculate real sigma after first transformation real_sigma_2 = np.matmul(first_transformation, np.matmul(real_sigma, first_transformation.T)) w,v = linalg.eig(real_sigma_2) second_transformation = v.T #checking whether this transformation works properly or not real_sigma_3 = np.matmul(second_transformation, np.matmul(real_sigma_2, second_transformation.T)) print ("real sigma after transformation") print (real_sigma_3) # Now check that this transformation makes no change in the estimaed sigma: print ("estimated sigma after transformation") estimated_sigma_3 = np.matmul(second_transformation, np.matmul(estimated_sigma_2, second_transformation.T)) print (estimated_sigma_3) # In[13]: #So we have to apply this new transformation to the sample data import sys transformed_whiten = [] for i in range(0, len(whiten_samples)): transformed_whiten.append(np.matmul(second_transformation, whiten_samples[i])) transformed_whiten = np.asarray(transformed_whiten) plt.gca().set_aspect('equal') plt.scatter (transformed_whiten[:,0], transformed_whiten[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_whiten[:, 0], transformed_whiten[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # V = [1.02404447 1.0067168 ] # #### h) Find a transformation for covariance matrix of the distribution in part b., such that when applied on the data, the covariance matrix of the transformed data becomes I . Transform the data and display the distribution in the new space. # In[14]: w,v = linalg.eig(real_sigma) normalizer = np.diag(np.sqrt(1/w)) transformation = (np.matmul(v, normalizer)).T transformed_version = [] for i in range(0, len(samples)): transformed_version.append(np.matmul(transformation, samples[i])) transformed_version = np.asarray(transformed_version) plt.gca().set_aspect('equal') plt.scatter (transformed_version[:,0], transformed_version[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_version[:, 0], transformed_version[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # #### i) Calculate the eigenvalues and eigenvectors associated with the covariance matrix of the distribution in part b. Plot the eigenvectors. What can you infer from them? # # $$ det (\begin{bmatrix} # 2-\lambda & 1 \\ # 1 & 3 - \lambda # \end{bmatrix}) = 0 \rightarrow \lambda^2 - 5\lambda + 5 =0 \rightarrow \lambda_1 = 1.38, \lambda_2= 3.62 $$ # $$ v_1= (-0.85, 0.52) , v_2 = (-0.52, -0.85) $$ # In[15]: w,v = linalg.eig(real_sigma) N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) magnitude = -2 plt.plot([2,2 + magnitude * w[0] * v[0][0]], [1, 1+magnitude * w[0] * v[1][0]], color="red") plt.plot([2,2 +magnitude * w[1] * v[0][1]], [1, 1+magnitude * w[1] *v[1][1]], color="yellow") plt.show() bins= 10 # As you can see, the eigen vectors are in the direction of ellipsoid axises. The one with the bigger eigen value is in the direction of semi major axis. # #### j) Again, consider the distribution and samples you generated in part b. Construct a 2*2 matrix P , which has eigenvectors associated with $\Sigma$ as its columns. Project your generated samples to a new space using $Y_i = (X_i -M)*P $, and plot the samples. What differences do you notice? # In[16]: P = np.array([[v[0][1], v[0][0]],[v[1][1], v[1][0]]]) new_samples = [] mean = np.array([2,1]) print (np.matmul((samples[0]-mean), P)) for i in range(0, len(samples)): new_samples.append(np.matmul((samples[i]-mean), P)) new_samples = np.asarray(new_samples) plt.xlim([-10,10]) plt.ylim([-10,10]) plt.gca().set_aspect('equal') plt.scatter (new_samples[:,0], new_samples[:,1], s=0.2) plt.show() # The difference between this transformation and the previous one is that this transformation is in the **right side** of the vector while the prevoius one was in the **left side**.<br> # So we can use **eigenvector matrix** in the as right side transform or **transposed of (eigenvector matrix)** in the left side. # #### k) Find the covariance matrix associated with the projected samples in part h. Also calculate its eigenvalues and eigenvectors, and comment on the results. # In[17]: estimated_mean = np.mean(transformed_version, axis=0) print ("estimated mean \n", estimated_mean) estimated_var = 0 for i in range(0, len(transformed_version)): vec = np.array([transformed_version[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) # In[18]: w,v =linalg.eig(estimated_var) print (w) print (v) # Since this covariance matrix is actually identity matrix, its eigenvalues is 1 and its eigenvectors is (1,0) and (0,1)
s = np.random.normal(mu, sigmas[i], N) print ("mu", mu, "Sigma", sigmas[i]) sns.distplot(s, color=colors[i], bins=bins) plt.scatter (s, np.zeros(len(s)), s=0.2, color=colors[i]) plt.xlim([485, 515]) plt.show()
conditional_block
p6.py
# coding: utf-8 # ## Question 6 # ### Ali Mortazavi # ### 96131044 # # In many pattern recognition applications, Sample Generation plays an important role, where it is # necessary to generate samples which are to be normally distributed according to a given expected # vector and a covariance matrix.<bn> # In this problem, you are going to do this technique yourself. You will also practice some more # complicated matrix operations as well. # In[1]: import random import numpy as np from matplotlib import pyplot import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.decomposition import PCA import pandas as pd import numpy as np from numpy.linalg import inv import seaborn as sns from numpy import linalg np.set_printoptions(suppress=True) # #### a) Generate samples from three normal distributions specified by the following parameters: <br> # $n=1, N=500, \mu=500 \sigma=1,2,3$ <br> # Plot the samples, as well as the histograms associated with each of the distributions. # Compare the results. # In[2]: mu = 500 sigmas = [1,2,3] colors = ["orange", "blue", "green"] N = 500 bins = 50 print ("HISTOGRAM and Data Points") for i in range(0, len(sigmas)): s = np.random.normal(mu, sigmas[i], N) print ("mu", mu, "Sigma", sigmas[i]) sns.distplot(s, color=colors[i], bins=bins) plt.scatter (s, np.zeros(len(s)), s=0.2, color=colors[i]) plt.xlim([485, 515]) plt.show() # # for i in range(0, len(sigmas)): # s = np.random.normal(mu, sigmas[i], N) # print ("mu", mu, "Sigma", sigmas[i]) # plt.scatter (s, np.zeros(len(s)), s=0.1, color=colors[i]) # plt.xlim([485, 515]) # plt.show() # #### b) Generate samples from a normal distributions specified by the following parameters: <br> # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 1 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 2 & 1 \\ # 1 & 3 # \end{bmatrix}$ <br> # Display the samples, as well as the associated contour plot. # $ # \begin{pmatrix} # 2 & 3 & 1 \ # 0.5 & 2 & -1 \ # -1 & 5 & -7 # \end{pmatrix} # $ # In[3]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 20 (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() samples_for_f = samples # #### c. Consider a normal distribution specified by the following parameters: # $ n = 2, N = 500, M = \begin{bmatrix} # m_1 \\ # m_2 # \end{bmatrix}, \Sigma = \begin{bmatrix} # \sigma_{11} & \sigma_{12} \\ # \sigma_{21} & \sigma_{22} # \end{bmatrix}$ <br> # Determine appropriate values for each of the unknown variables, so that the shape of the # distribution becomes: <br> # > c.1) A circle in the upper left of the Euclidean coordinate system. <br> # > c.2) A diagonal line (/ shape) in the centre<br> # > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br> # # # Display the generated samples. <br> # c.1) A circle in the upper left of the Euclidean coordinate system: # $$ m_1 < 0 , m_2 > 0, \Sigma = I $$ # In[4]: def setting_function ():
# In[5]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N) setting_function() plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.2) A diagonal line (/ shape) in the centre<br> # We have to select M to be $(0,0)$ for being in the center. <br> # to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br> # To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $ # # In[6]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system # # In[7]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # #### d) Consider a random variable with # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 3 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 1 & 2\rho \\ # 2 \rho & 4 # \end{bmatrix}$ <br> # #### compute $d^2(x)$ analytically, if the parameters are: # $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$ # If $ \Sigma^{-1} = \begin{bmatrix} # a & b \\ # c & d # \end{bmatrix}$ <br> then: # $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br> # for $\rho =-0.99$: # $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$ # In[8]: levels=[4,9,16] plt.xlim(-10,10) plt.ylim(-10,10) X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1] xy = np.vstack((X.flatten(), Y.flatten())).T Sigmas = [] vals = [-0.99, -0.5, 0.5, 0.99] for val in vals: Sigmas.append(np.array([[1,2*val],[2*val,4]])) mu = np.array([2,3]) #np.array([0,0])# np.array([2,3]) def d_squared (xy, sigma, mu): matrix = inv(sigma) a = xy - mu t = np.matmul(a,matrix) return np.matmul(t, a.T) for sigma in Sigmas: print ("Sigma is") print (sigma) print ("Formula") t= inv(sigma) a = t[0][0] b = t[0][1] c = t[1][0] d = t[1][1] s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0" print (s) Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu) Z = np.reshape(Z, (len(X), -1)) plt.contour(X,Y,Z, levels=[4,9,16]) plt.show() # #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results. # In[9]: #Just repeating part b: N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 10 plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[10]: estimated_mean = np.mean(samples, axis=0) print ("estimated mean", estimated_mean) print ("real mean [2,1]") estimated_var = 0 for i in range(0, len(samples)): vec = np.array([samples[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) print ("real sigma:") print (np.array([[2,1],[1,3]]) ) # Comment: The estimated mean and sigma are close to real mean and real sigma. <br> # Since these estimations are consistent and not biased, the estimation will become close to the real value as the number of samples become large enough. # #### g) Simultaneously diagonalise $\sigma$ and $\hat{\sigma}$ , and form a vector $ V = [\lambda_1, \lambda_2]^T $ # # In[11]: real_sigma = np.array([[2,1],[1,3]]) # First, whiten the estimated sigma: estimated_sigma = estimated_var w, v = linalg.eig(estimated_sigma) normalizer = np.diag(np.sqrt(1/w)) first_transformation = np.matmul(v, normalizer) print ("first transformation") print (first_transformation) print ("checking whether does it transform sigma to I") estimated_sigma_2 = np.matmul(first_transformation.T,( np.matmul(estimated_sigma, first_transformation))) print (estimated_sigma_2) print (v) first_transformation = first_transformation.T # Test on sampled data whiten_samples = [] for i in range(0, len(samples)): whiten_samples.append(np.matmul(first_transformation, samples[i])) whiten_samples = np.asarray(whiten_samples) # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') plt.scatter (whiten_samples[:,0], whiten_samples[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(whiten_samples[:, 0], whiten_samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[12]: #now for the real sigma # calculate real sigma after first transformation real_sigma_2 = np.matmul(first_transformation, np.matmul(real_sigma, first_transformation.T)) w,v = linalg.eig(real_sigma_2) second_transformation = v.T #checking whether this transformation works properly or not real_sigma_3 = np.matmul(second_transformation, np.matmul(real_sigma_2, second_transformation.T)) print ("real sigma after transformation") print (real_sigma_3) # Now check that this transformation makes no change in the estimaed sigma: print ("estimated sigma after transformation") estimated_sigma_3 = np.matmul(second_transformation, np.matmul(estimated_sigma_2, second_transformation.T)) print (estimated_sigma_3) # In[13]: #So we have to apply this new transformation to the sample data import sys transformed_whiten = [] for i in range(0, len(whiten_samples)): transformed_whiten.append(np.matmul(second_transformation, whiten_samples[i])) transformed_whiten = np.asarray(transformed_whiten) plt.gca().set_aspect('equal') plt.scatter (transformed_whiten[:,0], transformed_whiten[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_whiten[:, 0], transformed_whiten[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # V = [1.02404447 1.0067168 ] # #### h) Find a transformation for covariance matrix of the distribution in part b., such that when applied on the data, the covariance matrix of the transformed data becomes I . Transform the data and display the distribution in the new space. # In[14]: w,v = linalg.eig(real_sigma) normalizer = np.diag(np.sqrt(1/w)) transformation = (np.matmul(v, normalizer)).T transformed_version = [] for i in range(0, len(samples)): transformed_version.append(np.matmul(transformation, samples[i])) transformed_version = np.asarray(transformed_version) plt.gca().set_aspect('equal') plt.scatter (transformed_version[:,0], transformed_version[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_version[:, 0], transformed_version[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # #### i) Calculate the eigenvalues and eigenvectors associated with the covariance matrix of the distribution in part b. Plot the eigenvectors. What can you infer from them? # # $$ det (\begin{bmatrix} # 2-\lambda & 1 \\ # 1 & 3 - \lambda # \end{bmatrix}) = 0 \rightarrow \lambda^2 - 5\lambda + 5 =0 \rightarrow \lambda_1 = 1.38, \lambda_2= 3.62 $$ # $$ v_1= (-0.85, 0.52) , v_2 = (-0.52, -0.85) $$ # In[15]: w,v = linalg.eig(real_sigma) N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) magnitude = -2 plt.plot([2,2 + magnitude * w[0] * v[0][0]], [1, 1+magnitude * w[0] * v[1][0]], color="red") plt.plot([2,2 +magnitude * w[1] * v[0][1]], [1, 1+magnitude * w[1] *v[1][1]], color="yellow") plt.show() bins= 10 # As you can see, the eigen vectors are in the direction of ellipsoid axises. The one with the bigger eigen value is in the direction of semi major axis. # #### j) Again, consider the distribution and samples you generated in part b. Construct a 2*2 matrix P , which has eigenvectors associated with $\Sigma$ as its columns. Project your generated samples to a new space using $Y_i = (X_i -M)*P $, and plot the samples. What differences do you notice? # In[16]: P = np.array([[v[0][1], v[0][0]],[v[1][1], v[1][0]]]) new_samples = [] mean = np.array([2,1]) print (np.matmul((samples[0]-mean), P)) for i in range(0, len(samples)): new_samples.append(np.matmul((samples[i]-mean), P)) new_samples = np.asarray(new_samples) plt.xlim([-10,10]) plt.ylim([-10,10]) plt.gca().set_aspect('equal') plt.scatter (new_samples[:,0], new_samples[:,1], s=0.2) plt.show() # The difference between this transformation and the previous one is that this transformation is in the **right side** of the vector while the prevoius one was in the **left side**.<br> # So we can use **eigenvector matrix** in the as right side transform or **transposed of (eigenvector matrix)** in the left side. # #### k) Find the covariance matrix associated with the projected samples in part h. Also calculate its eigenvalues and eigenvectors, and comment on the results. # In[17]: estimated_mean = np.mean(transformed_version, axis=0) print ("estimated mean \n", estimated_mean) estimated_var = 0 for i in range(0, len(transformed_version)): vec = np.array([transformed_version[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) # In[18]: w,v =linalg.eig(estimated_var) print (w) print (v) # Since this covariance matrix is actually identity matrix, its eigenvalues is 1 and its eigenvectors is (1,0) and (0,1)
plt.xlim([-10, 10]) plt.ylim([-10, 10]) ax = plt.gca() ax.spines['top'].set_color('none') ax.spines['bottom'].set_position('zero') ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none')
identifier_body
p6.py
# coding: utf-8 # ## Question 6 # ### Ali Mortazavi # ### 96131044 # # In many pattern recognition applications, Sample Generation plays an important role, where it is # necessary to generate samples which are to be normally distributed according to a given expected # vector and a covariance matrix.<bn> # In this problem, you are going to do this technique yourself. You will also practice some more # complicated matrix operations as well. # In[1]: import random import numpy as np from matplotlib import pyplot import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.decomposition import PCA import pandas as pd import numpy as np from numpy.linalg import inv import seaborn as sns from numpy import linalg np.set_printoptions(suppress=True) # #### a) Generate samples from three normal distributions specified by the following parameters: <br> # $n=1, N=500, \mu=500 \sigma=1,2,3$ <br> # Plot the samples, as well as the histograms associated with each of the distributions. # Compare the results. # In[2]: mu = 500 sigmas = [1,2,3] colors = ["orange", "blue", "green"] N = 500 bins = 50 print ("HISTOGRAM and Data Points") for i in range(0, len(sigmas)): s = np.random.normal(mu, sigmas[i], N) print ("mu", mu, "Sigma", sigmas[i]) sns.distplot(s, color=colors[i], bins=bins) plt.scatter (s, np.zeros(len(s)), s=0.2, color=colors[i]) plt.xlim([485, 515]) plt.show() # # for i in range(0, len(sigmas)): # s = np.random.normal(mu, sigmas[i], N) # print ("mu", mu, "Sigma", sigmas[i]) # plt.scatter (s, np.zeros(len(s)), s=0.1, color=colors[i]) # plt.xlim([485, 515]) # plt.show() # #### b) Generate samples from a normal distributions specified by the following parameters: <br> # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 1 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 2 & 1 \\ # 1 & 3 # \end{bmatrix}$ <br> # Display the samples, as well as the associated contour plot. # $ # \begin{pmatrix} # 2 & 3 & 1 \ # 0.5 & 2 & -1 \ # -1 & 5 & -7 # \end{pmatrix} # $ # In[3]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 20 (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() samples_for_f = samples # #### c. Consider a normal distribution specified by the following parameters: # $ n = 2, N = 500, M = \begin{bmatrix} # m_1 \\ # m_2 # \end{bmatrix}, \Sigma = \begin{bmatrix} # \sigma_{11} & \sigma_{12} \\ # \sigma_{21} & \sigma_{22} # \end{bmatrix}$ <br> # Determine appropriate values for each of the unknown variables, so that the shape of the # distribution becomes: <br> # > c.1) A circle in the upper left of the Euclidean coordinate system. <br> # > c.2) A diagonal line (/ shape) in the centre<br> # > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br> # # # Display the generated samples. <br> # c.1) A circle in the upper left of the Euclidean coordinate system: # $$ m_1 < 0 , m_2 > 0, \Sigma = I $$ # In[4]: def setting_function (): plt.xlim([-10, 10]) plt.ylim([-10, 10]) ax = plt.gca() ax.spines['top'].set_color('none') ax.spines['bottom'].set_position('zero') ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') # In[5]: N =5000 # we changed N, because N=500 was too small for being visualized well samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N) setting_function() plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.2) A diagonal line (/ shape) in the centre<br> # We have to select M to be $(0,0)$ for being in the center. <br> # to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br> # To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $ # # In[6]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system # # In[7]: N=10000 setting_function() samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N) plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() # #### d) Consider a random variable with # $ n = 2, N = 500, M = \begin{bmatrix} # 2 \\ # 3 # \end{bmatrix}, \Sigma = \begin{bmatrix} # 1 & 2\rho \\ # 2 \rho & 4 # \end{bmatrix}$ <br> # #### compute $d^2(x)$ analytically, if the parameters are: # $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$ # If $ \Sigma^{-1} = \begin{bmatrix} # a & b \\ # c & d # \end{bmatrix}$ <br> then: # $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br> # for $\rho =-0.99$: # $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$ # In[8]: levels=[4,9,16] plt.xlim(-10,10) plt.ylim(-10,10) X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1] xy = np.vstack((X.flatten(), Y.flatten())).T Sigmas = [] vals = [-0.99, -0.5, 0.5, 0.99] for val in vals: Sigmas.append(np.array([[1,2*val],[2*val,4]])) mu = np.array([2,3]) #np.array([0,0])# np.array([2,3]) def d_squared (xy, sigma, mu): matrix = inv(sigma) a = xy - mu t = np.matmul(a,matrix) return np.matmul(t, a.T) for sigma in Sigmas: print ("Sigma is") print (sigma) print ("Formula") t= inv(sigma) a = t[0][0] b = t[0][1] c = t[1][0] d = t[1][1] s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0" print (s) Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu) Z = np.reshape(Z, (len(X), -1)) plt.contour(X,Y,Z, levels=[4,9,16]) plt.show() # #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results. # In[9]: #Just repeating part b: N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) plt.show() bins= 10 plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[10]: estimated_mean = np.mean(samples, axis=0) print ("estimated mean", estimated_mean)
vec = np.array([samples[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) print ("real sigma:") print (np.array([[2,1],[1,3]]) ) # Comment: The estimated mean and sigma are close to real mean and real sigma. <br> # Since these estimations are consistent and not biased, the estimation will become close to the real value as the number of samples become large enough. # #### g) Simultaneously diagonalise $\sigma$ and $\hat{\sigma}$ , and form a vector $ V = [\lambda_1, \lambda_2]^T $ # # In[11]: real_sigma = np.array([[2,1],[1,3]]) # First, whiten the estimated sigma: estimated_sigma = estimated_var w, v = linalg.eig(estimated_sigma) normalizer = np.diag(np.sqrt(1/w)) first_transformation = np.matmul(v, normalizer) print ("first transformation") print (first_transformation) print ("checking whether does it transform sigma to I") estimated_sigma_2 = np.matmul(first_transformation.T,( np.matmul(estimated_sigma, first_transformation))) print (estimated_sigma_2) print (v) first_transformation = first_transformation.T # Test on sampled data whiten_samples = [] for i in range(0, len(samples)): whiten_samples.append(np.matmul(first_transformation, samples[i])) whiten_samples = np.asarray(whiten_samples) # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') plt.scatter (whiten_samples[:,0], whiten_samples[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(whiten_samples[:, 0], whiten_samples[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # In[12]: #now for the real sigma # calculate real sigma after first transformation real_sigma_2 = np.matmul(first_transformation, np.matmul(real_sigma, first_transformation.T)) w,v = linalg.eig(real_sigma_2) second_transformation = v.T #checking whether this transformation works properly or not real_sigma_3 = np.matmul(second_transformation, np.matmul(real_sigma_2, second_transformation.T)) print ("real sigma after transformation") print (real_sigma_3) # Now check that this transformation makes no change in the estimaed sigma: print ("estimated sigma after transformation") estimated_sigma_3 = np.matmul(second_transformation, np.matmul(estimated_sigma_2, second_transformation.T)) print (estimated_sigma_3) # In[13]: #So we have to apply this new transformation to the sample data import sys transformed_whiten = [] for i in range(0, len(whiten_samples)): transformed_whiten.append(np.matmul(second_transformation, whiten_samples[i])) transformed_whiten = np.asarray(transformed_whiten) plt.gca().set_aspect('equal') plt.scatter (transformed_whiten[:,0], transformed_whiten[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_whiten[:, 0], transformed_whiten[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # V = [1.02404447 1.0067168 ] # #### h) Find a transformation for covariance matrix of the distribution in part b., such that when applied on the data, the covariance matrix of the transformed data becomes I . Transform the data and display the distribution in the new space. # In[14]: w,v = linalg.eig(real_sigma) normalizer = np.diag(np.sqrt(1/w)) transformation = (np.matmul(v, normalizer)).T transformed_version = [] for i in range(0, len(samples)): transformed_version.append(np.matmul(transformation, samples[i])) transformed_version = np.asarray(transformed_version) plt.gca().set_aspect('equal') plt.scatter (transformed_version[:,0], transformed_version[:,1], s=0.2) plt.show() bins= 10 # plt.xlim([-6,4]) # plt.ylim([-5,5]) plt.gca().set_aspect('equal') (counts, x_bins, y_bins) = np.histogram2d(transformed_version[:, 0], transformed_version[:, 1], bins=bins) plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]]) plt.show() # #### i) Calculate the eigenvalues and eigenvectors associated with the covariance matrix of the distribution in part b. Plot the eigenvectors. What can you infer from them? # # $$ det (\begin{bmatrix} # 2-\lambda & 1 \\ # 1 & 3 - \lambda # \end{bmatrix}) = 0 \rightarrow \lambda^2 - 5\lambda + 5 =0 \rightarrow \lambda_1 = 1.38, \lambda_2= 3.62 $$ # $$ v_1= (-0.85, 0.52) , v_2 = (-0.52, -0.85) $$ # In[15]: w,v = linalg.eig(real_sigma) N =5000 samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N) plt.xlim([-7.5, 13.5]) plt.ylim([-10, 10]) plt.gca().set_aspect('equal') plt.scatter (samples[:,0], samples[:,1], s=0.2) magnitude = -2 plt.plot([2,2 + magnitude * w[0] * v[0][0]], [1, 1+magnitude * w[0] * v[1][0]], color="red") plt.plot([2,2 +magnitude * w[1] * v[0][1]], [1, 1+magnitude * w[1] *v[1][1]], color="yellow") plt.show() bins= 10 # As you can see, the eigen vectors are in the direction of ellipsoid axises. The one with the bigger eigen value is in the direction of semi major axis. # #### j) Again, consider the distribution and samples you generated in part b. Construct a 2*2 matrix P , which has eigenvectors associated with $\Sigma$ as its columns. Project your generated samples to a new space using $Y_i = (X_i -M)*P $, and plot the samples. What differences do you notice? # In[16]: P = np.array([[v[0][1], v[0][0]],[v[1][1], v[1][0]]]) new_samples = [] mean = np.array([2,1]) print (np.matmul((samples[0]-mean), P)) for i in range(0, len(samples)): new_samples.append(np.matmul((samples[i]-mean), P)) new_samples = np.asarray(new_samples) plt.xlim([-10,10]) plt.ylim([-10,10]) plt.gca().set_aspect('equal') plt.scatter (new_samples[:,0], new_samples[:,1], s=0.2) plt.show() # The difference between this transformation and the previous one is that this transformation is in the **right side** of the vector while the prevoius one was in the **left side**.<br> # So we can use **eigenvector matrix** in the as right side transform or **transposed of (eigenvector matrix)** in the left side. # #### k) Find the covariance matrix associated with the projected samples in part h. Also calculate its eigenvalues and eigenvectors, and comment on the results. # In[17]: estimated_mean = np.mean(transformed_version, axis=0) print ("estimated mean \n", estimated_mean) estimated_var = 0 for i in range(0, len(transformed_version)): vec = np.array([transformed_version[i]-estimated_mean]) tmp = np.matmul(vec.T, vec) estimated_var += tmp estimated_var /= (len(samples)-1) print ("estimated sigma") print (estimated_var) # In[18]: w,v =linalg.eig(estimated_var) print (w) print (v) # Since this covariance matrix is actually identity matrix, its eigenvalues is 1 and its eigenvectors is (1,0) and (0,1)
print ("real mean [2,1]") estimated_var = 0 for i in range(0, len(samples)):
random_line_split
twitch.py
#!/usr/bin/env python3 import os import sys import json import dateutil.parser import datetime import pytz import logging import aiohttp import discord logger = logging.getLogger('twitch') stream_state = {} async def twitch_request(client_id, endpoint, in_id): params = { 'id': in_id } headers = { 'Client-ID': client_id } try: async with aiohttp.get('https://api.twitch.tv/helix/%s' % endpoint, headers=headers, params=params) as r: if r.status == 200: js = await r.json() return js['data'][0] else: logger.error('Twitch HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch baddness') return None async def get_user(client_id, user_id): return await twitch_request(client_id, 'users', user_id) async def
(client_id, game_id, user_id): game = await twitch_request(client_id, 'games', game_id) if game: return game['name'] else: headers = { 'Client-ID': client_id, 'Accept': 'application/vnd.twitchtv.v5+json' } try: async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r: if r.status == 200: js = await r.json() game_name = ['stream']['game'] if game_name == "": game_name = 'Playing some videogames' return game_name else: logger.error('Twitch Kraken HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch Kraken baddness') return 'Playing some videogames' async def lookup_users(config, user_list): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r: if r.status == 200: user_json = await r.json() return user_json['data'] else: logger.error("Username look-up fail %d" % r.status) logger.error(await r.text()) return [] def ibzytime(hour, minute): negative = False hm = (hour * 60) + minute if (hour < 11): hm += (24 * 60) hm -= (23 * 60) if (hm < 0): hm *= -1 negative = True return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60)) async def parse_streams(client, config, server, stream_data): users_announced = [] try: client_id = config['twitch']['client-id'] for live_data in stream_data['data']: logger.debug(live_data) if ('type' in live_data) and (live_data['type'] != 'live'): logger.info('Ignoring VOD') continue # Was seeing some issues where the first notification had no language set, and then the second was sent # with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications # being ignored. #if ('language' in live_data) and (live_data['language'] == ''): # logger.info("Ignoring live data with no language set") # continue start_time = dateutil.parser.parse(live_data['started_at']) ourtz = pytz.timezone('Europe/London') time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) start_time_local = start_time.astimezone(ourtz) time_diff = time_now - start_time logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff)) user_id = live_data['user_id'] user = await get_user(client_id, user_id) last_stream = None if user_id in stream_state: last_stream = stream_state[user_id] game_title = await get_game_title(client_id, live_data['game_id'], user_id) user_url = "https://twitch.tv/%s" % user['login'] embed = discord.Embed(title = user_url, url = user_url, color = 2207743) embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url) embed.set_thumbnail(url = user['profile_image_url']) embed.add_field(name = game_title, value = live_data['title'], inline = False) if user['login'] == 'evenibzy': embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute))) else: embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute))) channels = config['discord']['channels'] channel_name = channels['_default_'] delete = True if user['login'] in channels: channel_name = channels[user['login']] delete = False logger.debug("channel_name=%s" % channel_name) channel = discord.utils.get(server.channels, name = channel_name) try: new_stream = {} new_stream['message'] = await client.send_message(channel, embed = embed) stream_state[user_id] = new_stream users_announced.append(user['display_name']) logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id)) if last_stream and delete: logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id)) try: await client.delete_message(last_stream['message']) except: logger.exception('Delete failed') elif not delete: logger.debug('No delete on this stream') else: logger.debug('No prior stream to delete') except: logger.exception('Discord badness') logger.error("channel_name=%s" % channel_name) logger.error("embed=%s" % embed.to_dict()) except: logger.exception('Stream badness') return users_announced async def sub_unsub_user(config, user_logins, subscribe, users = None): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } # Post data for subscription request sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe", "hub.lease_seconds": 864000, "hub.secret": config['twitch']['secret'] } if not users: users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins))) user_names = '' user_ids = [] # Send a (un)subcription request for each username for user in users: logger.info('%s: %s' % (user['display_name'], user['id'])) sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id']) sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id'] # Send a (un)subcription request for each username async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r: if r.status== 202: logger.info('%s OK' % sub_data['hub.topic']) user_names += ' %s' % user['display_name'] user_ids.append(user['id']) else: logger.error('Went wrong %d' % r.status) logger.error(await r.text()) if len(user_ids) > 0: if subscribe: return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids) else: return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids) return ("Sorry, old-bean. I couldn't find anyone.", None) async def sub_user(config, user_logins): return await sub_unsub_user(config, user_logins, True) async def unsub_user(config, user_logins): return await sub_unsub_user(config, user_logins, False) async def announce_user(client, config, server, user_logins): response = "Nothing doing, I'm afraid" logger.info(user_logins) headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } params = list(map(lambda u: ('user_login', u), user_logins)) async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r: if r.status == 200: streams_json = await r.json() users = await parse_streams(client, config, server, streams_json) if len(users) > 0: response = "Announced %s" % (' '.join(users)) return (response, None) async def get_subs(config): headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] } get_more = True user_ids = [] params = None while get_more: get_more = False async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r: if r.status == 200: subs = await r.json() logger.debug("All subs: %s" % subs) server_str = 'lb3.server=%s' % config['discord']['server'] server_subs = list(filter(lambda sub: server_str in sub['callback'], subs['data'])) logger.debug("Server subs: %s" % server_subs) new_ids = list(map(lambda sub: ('id', sub['topic'].split('=')[1]), server_subs)) logger.debug("User IDs: %s" % new_ids) user_ids.extend(new_ids) if ('pagination' in subs) and ('cursor' in subs['pagination']): params = [('after', subs['pagination']['cursor'])] get_more = True else: logger.error('Twitch webhook HTTP badness: %s', r.status) logger.error(await r.text()) if len(user_ids) > 0: return await lookup_users(config, user_ids) return None async def list_subs(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) user_names = list(map(lambda user: user['display_name'], users)) return ("Twitch will tell me about **%s**" % ' '.join(user_names), None) else: return ("Sorry, I can't seem to find my notes", None) async def resub(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) return await sub_unsub_user(config, None, True, users) else: return ("I appear to have lost my users", None)
get_game_title
identifier_name
twitch.py
#!/usr/bin/env python3 import os import sys import json import dateutil.parser import datetime import pytz import logging import aiohttp import discord logger = logging.getLogger('twitch') stream_state = {} async def twitch_request(client_id, endpoint, in_id): params = { 'id': in_id } headers = { 'Client-ID': client_id } try: async with aiohttp.get('https://api.twitch.tv/helix/%s' % endpoint, headers=headers, params=params) as r: if r.status == 200: js = await r.json() return js['data'][0] else: logger.error('Twitch HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch baddness') return None async def get_user(client_id, user_id): return await twitch_request(client_id, 'users', user_id) async def get_game_title(client_id, game_id, user_id):
async def lookup_users(config, user_list): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r: if r.status == 200: user_json = await r.json() return user_json['data'] else: logger.error("Username look-up fail %d" % r.status) logger.error(await r.text()) return [] def ibzytime(hour, minute): negative = False hm = (hour * 60) + minute if (hour < 11): hm += (24 * 60) hm -= (23 * 60) if (hm < 0): hm *= -1 negative = True return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60)) async def parse_streams(client, config, server, stream_data): users_announced = [] try: client_id = config['twitch']['client-id'] for live_data in stream_data['data']: logger.debug(live_data) if ('type' in live_data) and (live_data['type'] != 'live'): logger.info('Ignoring VOD') continue # Was seeing some issues where the first notification had no language set, and then the second was sent # with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications # being ignored. #if ('language' in live_data) and (live_data['language'] == ''): # logger.info("Ignoring live data with no language set") # continue start_time = dateutil.parser.parse(live_data['started_at']) ourtz = pytz.timezone('Europe/London') time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) start_time_local = start_time.astimezone(ourtz) time_diff = time_now - start_time logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff)) user_id = live_data['user_id'] user = await get_user(client_id, user_id) last_stream = None if user_id in stream_state: last_stream = stream_state[user_id] game_title = await get_game_title(client_id, live_data['game_id'], user_id) user_url = "https://twitch.tv/%s" % user['login'] embed = discord.Embed(title = user_url, url = user_url, color = 2207743) embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url) embed.set_thumbnail(url = user['profile_image_url']) embed.add_field(name = game_title, value = live_data['title'], inline = False) if user['login'] == 'evenibzy': embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute))) else: embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute))) channels = config['discord']['channels'] channel_name = channels['_default_'] delete = True if user['login'] in channels: channel_name = channels[user['login']] delete = False logger.debug("channel_name=%s" % channel_name) channel = discord.utils.get(server.channels, name = channel_name) try: new_stream = {} new_stream['message'] = await client.send_message(channel, embed = embed) stream_state[user_id] = new_stream users_announced.append(user['display_name']) logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id)) if last_stream and delete: logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id)) try: await client.delete_message(last_stream['message']) except: logger.exception('Delete failed') elif not delete: logger.debug('No delete on this stream') else: logger.debug('No prior stream to delete') except: logger.exception('Discord badness') logger.error("channel_name=%s" % channel_name) logger.error("embed=%s" % embed.to_dict()) except: logger.exception('Stream badness') return users_announced async def sub_unsub_user(config, user_logins, subscribe, users = None): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } # Post data for subscription request sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe", "hub.lease_seconds": 864000, "hub.secret": config['twitch']['secret'] } if not users: users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins))) user_names = '' user_ids = [] # Send a (un)subcription request for each username for user in users: logger.info('%s: %s' % (user['display_name'], user['id'])) sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id']) sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id'] # Send a (un)subcription request for each username async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r: if r.status== 202: logger.info('%s OK' % sub_data['hub.topic']) user_names += ' %s' % user['display_name'] user_ids.append(user['id']) else: logger.error('Went wrong %d' % r.status) logger.error(await r.text()) if len(user_ids) > 0: if subscribe: return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids) else: return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids) return ("Sorry, old-bean. I couldn't find anyone.", None) async def sub_user(config, user_logins): return await sub_unsub_user(config, user_logins, True) async def unsub_user(config, user_logins): return await sub_unsub_user(config, user_logins, False) async def announce_user(client, config, server, user_logins): response = "Nothing doing, I'm afraid" logger.info(user_logins) headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } params = list(map(lambda u: ('user_login', u), user_logins)) async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r: if r.status == 200: streams_json = await r.json() users = await parse_streams(client, config, server, streams_json) if len(users) > 0: response = "Announced %s" % (' '.join(users)) return (response, None) async def get_subs(config): headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] } get_more = True user_ids = [] params = None while get_more: get_more = False async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r: if r.status == 200: subs = await r.json() logger.debug("All subs: %s" % subs) server_str = 'lb3.server=%s' % config['discord']['server'] server_subs = list(filter(lambda sub: server_str in sub['callback'], subs['data'])) logger.debug("Server subs: %s" % server_subs) new_ids = list(map(lambda sub: ('id', sub['topic'].split('=')[1]), server_subs)) logger.debug("User IDs: %s" % new_ids) user_ids.extend(new_ids) if ('pagination' in subs) and ('cursor' in subs['pagination']): params = [('after', subs['pagination']['cursor'])] get_more = True else: logger.error('Twitch webhook HTTP badness: %s', r.status) logger.error(await r.text()) if len(user_ids) > 0: return await lookup_users(config, user_ids) return None async def list_subs(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) user_names = list(map(lambda user: user['display_name'], users)) return ("Twitch will tell me about **%s**" % ' '.join(user_names), None) else: return ("Sorry, I can't seem to find my notes", None) async def resub(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) return await sub_unsub_user(config, None, True, users) else: return ("I appear to have lost my users", None)
game = await twitch_request(client_id, 'games', game_id) if game: return game['name'] else: headers = { 'Client-ID': client_id, 'Accept': 'application/vnd.twitchtv.v5+json' } try: async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r: if r.status == 200: js = await r.json() game_name = ['stream']['game'] if game_name == "": game_name = 'Playing some videogames' return game_name else: logger.error('Twitch Kraken HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch Kraken baddness') return 'Playing some videogames'
identifier_body
twitch.py
#!/usr/bin/env python3 import os import sys import json import dateutil.parser import datetime import pytz import logging import aiohttp import discord logger = logging.getLogger('twitch') stream_state = {} async def twitch_request(client_id, endpoint, in_id): params = { 'id': in_id } headers = { 'Client-ID': client_id } try: async with aiohttp.get('https://api.twitch.tv/helix/%s' % endpoint, headers=headers, params=params) as r: if r.status == 200: js = await r.json() return js['data'][0] else: logger.error('Twitch HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch baddness') return None async def get_user(client_id, user_id): return await twitch_request(client_id, 'users', user_id) async def get_game_title(client_id, game_id, user_id): game = await twitch_request(client_id, 'games', game_id) if game: return game['name'] else: headers = { 'Client-ID': client_id, 'Accept': 'application/vnd.twitchtv.v5+json' } try: async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r: if r.status == 200: js = await r.json() game_name = ['stream']['game'] if game_name == "": game_name = 'Playing some videogames' return game_name else: logger.error('Twitch Kraken HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch Kraken baddness') return 'Playing some videogames' async def lookup_users(config, user_list): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r: if r.status == 200: user_json = await r.json() return user_json['data'] else: logger.error("Username look-up fail %d" % r.status) logger.error(await r.text()) return [] def ibzytime(hour, minute): negative = False hm = (hour * 60) + minute if (hour < 11): hm += (24 * 60) hm -= (23 * 60) if (hm < 0): hm *= -1 negative = True return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60)) async def parse_streams(client, config, server, stream_data): users_announced = [] try: client_id = config['twitch']['client-id'] for live_data in stream_data['data']: logger.debug(live_data) if ('type' in live_data) and (live_data['type'] != 'live'): logger.info('Ignoring VOD') continue # Was seeing some issues where the first notification had no language set, and then the second was sent # with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications # being ignored. #if ('language' in live_data) and (live_data['language'] == ''): # logger.info("Ignoring live data with no language set") # continue start_time = dateutil.parser.parse(live_data['started_at']) ourtz = pytz.timezone('Europe/London') time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) start_time_local = start_time.astimezone(ourtz) time_diff = time_now - start_time logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff)) user_id = live_data['user_id'] user = await get_user(client_id, user_id) last_stream = None if user_id in stream_state: last_stream = stream_state[user_id] game_title = await get_game_title(client_id, live_data['game_id'], user_id) user_url = "https://twitch.tv/%s" % user['login'] embed = discord.Embed(title = user_url, url = user_url, color = 2207743) embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url) embed.set_thumbnail(url = user['profile_image_url']) embed.add_field(name = game_title, value = live_data['title'], inline = False) if user['login'] == 'evenibzy': embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute))) else: embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute))) channels = config['discord']['channels'] channel_name = channels['_default_'] delete = True if user['login'] in channels: channel_name = channels[user['login']] delete = False logger.debug("channel_name=%s" % channel_name) channel = discord.utils.get(server.channels, name = channel_name) try: new_stream = {} new_stream['message'] = await client.send_message(channel, embed = embed) stream_state[user_id] = new_stream users_announced.append(user['display_name']) logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id)) if last_stream and delete: logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id)) try: await client.delete_message(last_stream['message']) except: logger.exception('Delete failed') elif not delete: logger.debug('No delete on this stream') else: logger.debug('No prior stream to delete') except: logger.exception('Discord badness') logger.error("channel_name=%s" % channel_name) logger.error("embed=%s" % embed.to_dict()) except: logger.exception('Stream badness') return users_announced async def sub_unsub_user(config, user_logins, subscribe, users = None): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } # Post data for subscription request sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe", "hub.lease_seconds": 864000, "hub.secret": config['twitch']['secret'] } if not users: users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins))) user_names = '' user_ids = [] # Send a (un)subcription request for each username for user in users: logger.info('%s: %s' % (user['display_name'], user['id'])) sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id']) sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id'] # Send a (un)subcription request for each username async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r: if r.status== 202: logger.info('%s OK' % sub_data['hub.topic']) user_names += ' %s' % user['display_name'] user_ids.append(user['id']) else: logger.error('Went wrong %d' % r.status) logger.error(await r.text()) if len(user_ids) > 0: if subscribe: return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids) else: return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids) return ("Sorry, old-bean. I couldn't find anyone.", None) async def sub_user(config, user_logins): return await sub_unsub_user(config, user_logins, True) async def unsub_user(config, user_logins): return await sub_unsub_user(config, user_logins, False) async def announce_user(client, config, server, user_logins): response = "Nothing doing, I'm afraid" logger.info(user_logins) headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } params = list(map(lambda u: ('user_login', u), user_logins)) async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r: if r.status == 200:
return (response, None) async def get_subs(config): headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] } get_more = True user_ids = [] params = None while get_more: get_more = False async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r: if r.status == 200: subs = await r.json() logger.debug("All subs: %s" % subs) server_str = 'lb3.server=%s' % config['discord']['server'] server_subs = list(filter(lambda sub: server_str in sub['callback'], subs['data'])) logger.debug("Server subs: %s" % server_subs) new_ids = list(map(lambda sub: ('id', sub['topic'].split('=')[1]), server_subs)) logger.debug("User IDs: %s" % new_ids) user_ids.extend(new_ids) if ('pagination' in subs) and ('cursor' in subs['pagination']): params = [('after', subs['pagination']['cursor'])] get_more = True else: logger.error('Twitch webhook HTTP badness: %s', r.status) logger.error(await r.text()) if len(user_ids) > 0: return await lookup_users(config, user_ids) return None async def list_subs(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) user_names = list(map(lambda user: user['display_name'], users)) return ("Twitch will tell me about **%s**" % ' '.join(user_names), None) else: return ("Sorry, I can't seem to find my notes", None) async def resub(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) return await sub_unsub_user(config, None, True, users) else: return ("I appear to have lost my users", None)
streams_json = await r.json() users = await parse_streams(client, config, server, streams_json) if len(users) > 0: response = "Announced %s" % (' '.join(users))
conditional_block
twitch.py
#!/usr/bin/env python3 import os import sys import json import dateutil.parser import datetime import pytz import logging import aiohttp import discord logger = logging.getLogger('twitch') stream_state = {} async def twitch_request(client_id, endpoint, in_id): params = { 'id': in_id } headers = { 'Client-ID': client_id } try: async with aiohttp.get('https://api.twitch.tv/helix/%s' % endpoint, headers=headers, params=params) as r: if r.status == 200: js = await r.json() return js['data'][0] else: logger.error('Twitch HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch baddness') return None async def get_user(client_id, user_id): return await twitch_request(client_id, 'users', user_id) async def get_game_title(client_id, game_id, user_id): game = await twitch_request(client_id, 'games', game_id) if game: return game['name'] else: headers = { 'Client-ID': client_id, 'Accept': 'application/vnd.twitchtv.v5+json' } try: async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r: if r.status == 200: js = await r.json() game_name = ['stream']['game'] if game_name == "": game_name = 'Playing some videogames' return game_name else: logger.error('Twitch Kraken HTTP badness: %s', r.status) logger.error(await r.text()) except: logger.error('Twitch Kraken baddness') return 'Playing some videogames' async def lookup_users(config, user_list): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r: if r.status == 200: user_json = await r.json() return user_json['data'] else: logger.error("Username look-up fail %d" % r.status) logger.error(await r.text()) return [] def ibzytime(hour, minute): negative = False hm = (hour * 60) + minute if (hour < 11): hm += (24 * 60) hm -= (23 * 60) if (hm < 0): hm *= -1 negative = True return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60)) async def parse_streams(client, config, server, stream_data): users_announced = [] try: client_id = config['twitch']['client-id'] for live_data in stream_data['data']: logger.debug(live_data) if ('type' in live_data) and (live_data['type'] != 'live'): logger.info('Ignoring VOD') continue # Was seeing some issues where the first notification had no language set, and then the second was sent # with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications # being ignored. #if ('language' in live_data) and (live_data['language'] == ''): # logger.info("Ignoring live data with no language set") # continue start_time = dateutil.parser.parse(live_data['started_at']) ourtz = pytz.timezone('Europe/London') time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) start_time_local = start_time.astimezone(ourtz) time_diff = time_now - start_time logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff)) user_id = live_data['user_id'] user = await get_user(client_id, user_id) last_stream = None if user_id in stream_state: last_stream = stream_state[user_id] game_title = await get_game_title(client_id, live_data['game_id'], user_id) user_url = "https://twitch.tv/%s" % user['login'] embed = discord.Embed(title = user_url, url = user_url, color = 2207743) embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url) embed.set_thumbnail(url = user['profile_image_url']) embed.add_field(name = game_title, value = live_data['title'], inline = False) if user['login'] == 'evenibzy': embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute))) else: embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute))) channels = config['discord']['channels'] channel_name = channels['_default_'] delete = True if user['login'] in channels: channel_name = channels[user['login']] delete = False logger.debug("channel_name=%s" % channel_name) channel = discord.utils.get(server.channels, name = channel_name) try: new_stream = {} new_stream['message'] = await client.send_message(channel, embed = embed) stream_state[user_id] = new_stream users_announced.append(user['display_name']) logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id)) if last_stream and delete: logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id)) try: await client.delete_message(last_stream['message']) except: logger.exception('Delete failed') elif not delete: logger.debug('No delete on this stream') else: logger.debug('No prior stream to delete') except: logger.exception('Discord badness') logger.error("channel_name=%s" % channel_name) logger.error("embed=%s" % embed.to_dict()) except: logger.exception('Stream badness') return users_announced async def sub_unsub_user(config, user_logins, subscribe, users = None): headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } # Post data for subscription request sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe", "hub.lease_seconds": 864000, "hub.secret": config['twitch']['secret'] } if not users: users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins))) user_names = ''
sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id'] # Send a (un)subcription request for each username async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r: if r.status== 202: logger.info('%s OK' % sub_data['hub.topic']) user_names += ' %s' % user['display_name'] user_ids.append(user['id']) else: logger.error('Went wrong %d' % r.status) logger.error(await r.text()) if len(user_ids) > 0: if subscribe: return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids) else: return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids) return ("Sorry, old-bean. I couldn't find anyone.", None) async def sub_user(config, user_logins): return await sub_unsub_user(config, user_logins, True) async def unsub_user(config, user_logins): return await sub_unsub_user(config, user_logins, False) async def announce_user(client, config, server, user_logins): response = "Nothing doing, I'm afraid" logger.info(user_logins) headers = { 'Client-ID': config['twitch']['client-id'], 'Content-Type': 'application/json' } params = list(map(lambda u: ('user_login', u), user_logins)) async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r: if r.status == 200: streams_json = await r.json() users = await parse_streams(client, config, server, streams_json) if len(users) > 0: response = "Announced %s" % (' '.join(users)) return (response, None) async def get_subs(config): headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] } get_more = True user_ids = [] params = None while get_more: get_more = False async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r: if r.status == 200: subs = await r.json() logger.debug("All subs: %s" % subs) server_str = 'lb3.server=%s' % config['discord']['server'] server_subs = list(filter(lambda sub: server_str in sub['callback'], subs['data'])) logger.debug("Server subs: %s" % server_subs) new_ids = list(map(lambda sub: ('id', sub['topic'].split('=')[1]), server_subs)) logger.debug("User IDs: %s" % new_ids) user_ids.extend(new_ids) if ('pagination' in subs) and ('cursor' in subs['pagination']): params = [('after', subs['pagination']['cursor'])] get_more = True else: logger.error('Twitch webhook HTTP badness: %s', r.status) logger.error(await r.text()) if len(user_ids) > 0: return await lookup_users(config, user_ids) return None async def list_subs(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) user_names = list(map(lambda user: user['display_name'], users)) return ("Twitch will tell me about **%s**" % ' '.join(user_names), None) else: return ("Sorry, I can't seem to find my notes", None) async def resub(client, config): users = await get_subs(config) if users: logger.debug("Users: %s" % users) return await sub_unsub_user(config, None, True, users) else: return ("I appear to have lost my users", None)
user_ids = [] # Send a (un)subcription request for each username for user in users: logger.info('%s: %s' % (user['display_name'], user['id'])) sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id'])
random_line_split
msg.go
/* Package msg creates a simple messaging package with flexible formatting that can be used to write to multiple sinks. Here is an example use. import ( "jlinoff/utils/msg" "io" "os" ) // My package logger. var log *msg.Object // Initialize it at startup. func init() { // Only write to stdout. w := []io.Writer{os.Stdout} // The name of my package. It is only used %pkg is specified in the // format string. n := "MyPackage" // Format string. Note that i could use %utc instead of %time to get // UTC time. f := `%pkg %(-27)time %(-7)type %file %line - %msg` // Time format string, only used if %time or %utc are specified in the // the format string. t := `2006-01-02 15:05:05.000 MST` // Create the message object. // Note that this is the same as this because I used the defaults. // msg.NewMsg("MyPackage", "", "", []io.Writer{}) l, e := msg.NewMsg(n, f, t, w) if e != nil { panic(e) } log = l } func test() { log.Debug("message of type %v", "debug") log.Info("info message") log.Warn("warning") // Now print messages to stdout and to a log while in this scope. fp, _ := os.Create("log.txt") log.Writers = append(log.Writers, fp) // This stuff will go to stdout and the log file. log.Info("both") log.ErrNoExit("bad stuff happened but i can recover!") log.Printf("this is random text that is not formatted\n") // Clean up by removing the file from the writers and then // closing it. log.Writers = log.Writers[:len(log.Writers)-1] fp.Close() } */ package msg import ( "bytes" "fmt" "io" "os" "path" "runtime" "strconv" "time" ) // Interface defines logger functions. type Interface interface { Debug(f string, a ...interface{}) Info(f string, a ...interface{}) Warn(f string, a ...interface{}) Err(f string, a ...interface{}) ErrNoExit(f string, a ...interface{}) DebugWithLevel(l int, f string, a ...interface{}) InfoWithLevel(l int, f string, a ...interface{}) WarnWithLevel(l int, f string, a ...interface{}) ErrWithLevel(l int, f string, a ...interface{}) ErrNoExitWithLevel(l int, f string, a ...interface{}) Printf(f string, a ...interface{}) } // Object defines the logger. type Object struct { // Name is the package name. It is accessed in the format string by %pkg. Name string // DebugEnabled enables debug messages if true. // It is true by default. DebugEnabled bool // InfoEnabled enables info messages if true. // It is true by default. InfoEnabled bool // WarningEnabled enables warning messages if true. // It is true by default. WarningEnabled bool // ErrorExitCode is the exit code to use for the Error function. // The default is 1. ErrorExitCode int // Writers for the message output. // If no writers are specified, messages go to os.Stdout. Writers []io.Writer // TimeFormat is the format of the prefix timestamp. // See time.Format for details. // The default format is: "2006-01-02 15:05:05.000 MST" TimeFormat string // Format is the template for the output. It has the following specifiers. // // %file is the caller file name // %func is the function name // %line is the line number // %msg is the actual message // %pkg is the package name // %time is the time format in the current locale // %utc is the time format in the UTC locale // %type is the msg type: DEBUG, INFO, WARNING, ERROR // %% is a single % character // // You can explicitly format each field by specifying the formatting // options in parentheses. // // %(-28)time // // Any other text is left verbatim. // // The default format is. // `%(-27)time %(-7)type %file %line - %msg` Format string // outputFormat created by NewMsg and used to generate a message. outputFormat string // outputFlds created by NewMsg and used to specify the fields. outputFlds []string } // NewMsg makes a message object. // n - package name // f - format string, set to "" to get the default. // t - time stamp format, set to "" to get the default // w - the list of writers, if empty all messages go to stdout func NewMsg(n string, f string, t string, w []io.Writer) (obj *Object, err error) { obj = new(Object) obj.Name = n obj.DebugEnabled = true obj.InfoEnabled = true obj.WarningEnabled = true obj.ErrorExitCode = 1 if len(w) == 0 { obj.Writers = append(obj.Writers, os.Stdout) } else { obj.Writers = w } // Set the time format. If it is empty, set the default. if t == "" { obj.TimeFormat = "2006-01-02 15:05:05.000 MST" } else { obj.TimeFormat = t } // Set the format. If it is empty use the default. if f == "" { obj.Format = `%(-27)time %(-7)type %file %line - %msg` } else { obj.Format = f } // Parse the format. ofmt, oflds, err := ParseFormatString(obj.Format) obj.outputFormat = ofmt obj.outputFlds = oflds return } /* Debug prints a debug message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Debug("%v = %v", key, value) */ func (o Object) Debug(f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", 2, f, a...) } } /* DebugWithLevel prints a debug message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.DebugWithLevel(2, "%v = %v", key, value) */ func (o Object) DebugWithLevel(l int, f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", l, f, a...) } } /* Info prints an info message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Info("%v = %v", key, value) */ func (o Object) Info(f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", 2, f, a...) } } /* InfoWithLevel prints an info message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.InfoWithLevel(2, "%v = %v", key, value) */ func (o Object) InfoWithLevel(l int, f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", l, f, a...) } } /* Warn prints a warning message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Warn("%v = %v", key, value) */ func (o Object) Warn(f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* WarnWithLevel prints a warning message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.WarnWithLevel(2, "%v = %v", key, value) */ func (o Object) WarnWithLevel(l int, f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* Err prints an error message obtaining the callers filename, function and line number and exits. It cannot be disabled. It automatically appends a new line. Example: msg.Err("%v = %v", key, value) */ func (o Object) Err(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug() and exits. It cannot be disabled. It automatically appends a new line. Example: msg.ErrWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrNoExit prints an error message obtaining the callers filename, function and line number. It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExit("%v = %v", key, value) */ func (o Object) ErrNoExit(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) } /* ErrNoExitWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExitWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) } /* Printf prints directly to the log without the format string. It allows you to insert arbitrary text. Unlike the other functions it does not automatically append a new line. Example: msg.Printf("this is just random text that goes to all writers\n") */ func (o Object) Printf(f string, a ...interface{}) { // Create the formatted output string. s := fmt.Sprintf(f, a...) // Output it for each writer. for _, w := range o.Writers { fmt.Fprintf(w, s) } } /* PrintMsg is the basis of all message printers except Printf. It prints the formatted messages and normally would not be called directly. t - is the type, normally one of DEBUG, INFO, WARNING or ERROR l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on f - format string a - argument list */ func (o Object) PrintMsg(t string, l int, f string, a ...interface{}) { pc, fname, lineno, _ := runtime.Caller(l) fct := runtime.FuncForPC(pc).Name() fname = path.Base(fname[0 : len(fname)-3]) // strip off ".go" // The variables map for the format string. m := map[string]string{ "file": fname, "func": fct, "line": strconv.Itoa(lineno), "msg": fmt.Sprintf(f, a...), "pkg": o.Name, "time": time.Now().Truncate(time.Millisecond).Format(o.TimeFormat), "utc": time.Now().UTC().Truncate(time.Millisecond).Format(o.TimeFormat), "type": t, } // Collect the field values. var flds []interface{} for _, k := range o.outputFlds { if v, ok := m[k]; ok { flds = append(flds, v) } else { // This is, essentially, an assert. It should never happen. fmt.Fprintf(os.Stderr, "ERROR: unexpected condition, invalid specification id '%v'\n", k) os.Exit(1) } } // Create the formatted output string. s := fmt.Sprintf(o.outputFormat, flds...) + "\n" // Output it for each writer. for _, w := range o.Writers { _, err := fmt.Fprintf(w, s) if err != nil { fmt.Fprintf(os.Stderr, ` FATAL: fmt.Fprintf() failed for writer %v call stack = %v %v %v output = %v error = %v `, w, m["file"], m["func"], m["line"], s[:len(s)-2], err) os.Exit(1) } } } /* ParseFormatString transforms a format template to a format string and the list of fields to print in each message. It is meant to be used internally by NewMsg(). Here is an example transformation: input = "MYSTUFF %(-27)time %(-7)type %file %line - %msg" // TRANSFORM ofmt = "MYSTUFF %-27v %-7v %v %v - %v" oids = ["time", "type", "type", "file", "line", "msg"] */ func ParseFormatString(input string) (ofmt string, oids []string, err error) { ofmtb := []byte{} valid := []string{"file", "func", "line", "msg", "pkg", "time", "type", "utc"} ics := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-$") // Define the parse states. // normal - capture each byte // spec - capture a specification of the form %<id> or %(<fmt>)<id>. type state int const ( normal state = iota spec ) s := normal ib := []byte(input) for i := 0; i < len(ib); i++ { b := ib[i] switch s { case normal: // normal state, this is all of the stuff in the // template that is not part of a specification. if b == '%'
else { ofmtb = append(ofmtb, b) } case spec: s = normal // after parsing the spec go back to the normal state // specification state, parse specifications of the form: // %(<fmt>)<id> // %<id> beg := i - 1 if b == '(' { // This is a format specification. Capture it. // If ')' is not found, report an error. j := i // ib[j] == '(' for ; i < len(ib) && ib[i] != ')'; i++ { } if i >= len(ib) { err = fmt.Errorf("missing ')' for '%v'", string(ib[beg:])) return } ofmtb = append(ofmtb, '%') ofmtb = append(ofmtb, ib[j+1:i]...) ofmtb = append(ofmtb, 'v') i++ // point past the ')' } else { ofmtb = append(ofmtb, []byte("%v")[:]...) } // Now parse out the id. id := "" for _, v := range valid { ba := []byte(v) if bytes.HasPrefix(ib[i:], ba) { // We MAY have a match. // for example '%line' matches but '%linex' does not. i += len(ba) if i < len(ib) { bs := []byte{ib[i]} if bytes.Contains([]byte(ics), bs) { ofmt = string(ofmtb) ba = append(ba, ib[i]) err = fmt.Errorf("unrecognized specification id '%v'", string(ba)) return } } id = string(ba) i-- break } } if id == "" { ofmt = string(ofmtb) err = fmt.Errorf("specification syntax error '%v'", string(ib[beg:])) return } oids = append(oids, id) } } ofmt = string(ofmtb) return }
{ s = spec }
conditional_block
msg.go
/* Package msg creates a simple messaging package with flexible formatting that can be used to write to multiple sinks. Here is an example use. import ( "jlinoff/utils/msg" "io" "os" ) // My package logger. var log *msg.Object // Initialize it at startup. func init() { // Only write to stdout. w := []io.Writer{os.Stdout} // The name of my package. It is only used %pkg is specified in the // format string. n := "MyPackage" // Format string. Note that i could use %utc instead of %time to get // UTC time. f := `%pkg %(-27)time %(-7)type %file %line - %msg` // Time format string, only used if %time or %utc are specified in the // the format string. t := `2006-01-02 15:05:05.000 MST` // Create the message object. // Note that this is the same as this because I used the defaults. // msg.NewMsg("MyPackage", "", "", []io.Writer{}) l, e := msg.NewMsg(n, f, t, w) if e != nil { panic(e) } log = l } func test() { log.Debug("message of type %v", "debug") log.Info("info message") log.Warn("warning") // Now print messages to stdout and to a log while in this scope. fp, _ := os.Create("log.txt") log.Writers = append(log.Writers, fp) // This stuff will go to stdout and the log file. log.Info("both") log.ErrNoExit("bad stuff happened but i can recover!") log.Printf("this is random text that is not formatted\n") // Clean up by removing the file from the writers and then // closing it. log.Writers = log.Writers[:len(log.Writers)-1] fp.Close() } */ package msg import ( "bytes" "fmt" "io" "os" "path" "runtime" "strconv" "time" ) // Interface defines logger functions. type Interface interface { Debug(f string, a ...interface{}) Info(f string, a ...interface{}) Warn(f string, a ...interface{}) Err(f string, a ...interface{}) ErrNoExit(f string, a ...interface{}) DebugWithLevel(l int, f string, a ...interface{}) InfoWithLevel(l int, f string, a ...interface{}) WarnWithLevel(l int, f string, a ...interface{}) ErrWithLevel(l int, f string, a ...interface{}) ErrNoExitWithLevel(l int, f string, a ...interface{}) Printf(f string, a ...interface{}) } // Object defines the logger. type Object struct { // Name is the package name. It is accessed in the format string by %pkg. Name string // DebugEnabled enables debug messages if true. // It is true by default. DebugEnabled bool // InfoEnabled enables info messages if true. // It is true by default. InfoEnabled bool // WarningEnabled enables warning messages if true. // It is true by default. WarningEnabled bool // ErrorExitCode is the exit code to use for the Error function. // The default is 1. ErrorExitCode int // Writers for the message output. // If no writers are specified, messages go to os.Stdout. Writers []io.Writer // TimeFormat is the format of the prefix timestamp. // See time.Format for details. // The default format is: "2006-01-02 15:05:05.000 MST" TimeFormat string // Format is the template for the output. It has the following specifiers. // // %file is the caller file name // %func is the function name // %line is the line number // %msg is the actual message // %pkg is the package name // %time is the time format in the current locale // %utc is the time format in the UTC locale // %type is the msg type: DEBUG, INFO, WARNING, ERROR // %% is a single % character // // You can explicitly format each field by specifying the formatting // options in parentheses. // // %(-28)time // // Any other text is left verbatim. // // The default format is. // `%(-27)time %(-7)type %file %line - %msg` Format string // outputFormat created by NewMsg and used to generate a message. outputFormat string // outputFlds created by NewMsg and used to specify the fields. outputFlds []string } // NewMsg makes a message object. // n - package name // f - format string, set to "" to get the default. // t - time stamp format, set to "" to get the default // w - the list of writers, if empty all messages go to stdout func NewMsg(n string, f string, t string, w []io.Writer) (obj *Object, err error) { obj = new(Object) obj.Name = n obj.DebugEnabled = true obj.InfoEnabled = true obj.WarningEnabled = true obj.ErrorExitCode = 1 if len(w) == 0 { obj.Writers = append(obj.Writers, os.Stdout) } else { obj.Writers = w } // Set the time format. If it is empty, set the default. if t == "" { obj.TimeFormat = "2006-01-02 15:05:05.000 MST" } else { obj.TimeFormat = t } // Set the format. If it is empty use the default. if f == "" { obj.Format = `%(-27)time %(-7)type %file %line - %msg` } else { obj.Format = f } // Parse the format. ofmt, oflds, err := ParseFormatString(obj.Format) obj.outputFormat = ofmt obj.outputFlds = oflds return } /* Debug prints a debug message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Debug("%v = %v", key, value) */ func (o Object) Debug(f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", 2, f, a...) } } /* DebugWithLevel prints a debug message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.DebugWithLevel(2, "%v = %v", key, value) */ func (o Object) DebugWithLevel(l int, f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", l, f, a...) } } /* Info prints an info message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Info("%v = %v", key, value) */ func (o Object) Info(f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", 2, f, a...) } } /* InfoWithLevel prints an info message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.InfoWithLevel(2, "%v = %v", key, value) */ func (o Object) InfoWithLevel(l int, f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", l, f, a...) } } /* Warn prints a warning message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Warn("%v = %v", key, value) */ func (o Object) Warn(f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* WarnWithLevel prints a warning message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.WarnWithLevel(2, "%v = %v", key, value) */ func (o Object) WarnWithLevel(l int, f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* Err prints an error message obtaining the callers filename, function and line number and exits. It cannot be disabled. It automatically appends a new line. Example: msg.Err("%v = %v", key, value) */ func (o Object) Err(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug() and exits. It cannot be disabled. It automatically appends a new line. Example: msg.ErrWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrNoExit prints an error message obtaining the callers filename, function and line number. It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExit("%v = %v", key, value) */ func (o Object) ErrNoExit(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) } /* ErrNoExitWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExitWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) }
It allows you to insert arbitrary text. Unlike the other functions it does not automatically append a new line. Example: msg.Printf("this is just random text that goes to all writers\n") */ func (o Object) Printf(f string, a ...interface{}) { // Create the formatted output string. s := fmt.Sprintf(f, a...) // Output it for each writer. for _, w := range o.Writers { fmt.Fprintf(w, s) } } /* PrintMsg is the basis of all message printers except Printf. It prints the formatted messages and normally would not be called directly. t - is the type, normally one of DEBUG, INFO, WARNING or ERROR l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on f - format string a - argument list */ func (o Object) PrintMsg(t string, l int, f string, a ...interface{}) { pc, fname, lineno, _ := runtime.Caller(l) fct := runtime.FuncForPC(pc).Name() fname = path.Base(fname[0 : len(fname)-3]) // strip off ".go" // The variables map for the format string. m := map[string]string{ "file": fname, "func": fct, "line": strconv.Itoa(lineno), "msg": fmt.Sprintf(f, a...), "pkg": o.Name, "time": time.Now().Truncate(time.Millisecond).Format(o.TimeFormat), "utc": time.Now().UTC().Truncate(time.Millisecond).Format(o.TimeFormat), "type": t, } // Collect the field values. var flds []interface{} for _, k := range o.outputFlds { if v, ok := m[k]; ok { flds = append(flds, v) } else { // This is, essentially, an assert. It should never happen. fmt.Fprintf(os.Stderr, "ERROR: unexpected condition, invalid specification id '%v'\n", k) os.Exit(1) } } // Create the formatted output string. s := fmt.Sprintf(o.outputFormat, flds...) + "\n" // Output it for each writer. for _, w := range o.Writers { _, err := fmt.Fprintf(w, s) if err != nil { fmt.Fprintf(os.Stderr, ` FATAL: fmt.Fprintf() failed for writer %v call stack = %v %v %v output = %v error = %v `, w, m["file"], m["func"], m["line"], s[:len(s)-2], err) os.Exit(1) } } } /* ParseFormatString transforms a format template to a format string and the list of fields to print in each message. It is meant to be used internally by NewMsg(). Here is an example transformation: input = "MYSTUFF %(-27)time %(-7)type %file %line - %msg" // TRANSFORM ofmt = "MYSTUFF %-27v %-7v %v %v - %v" oids = ["time", "type", "type", "file", "line", "msg"] */ func ParseFormatString(input string) (ofmt string, oids []string, err error) { ofmtb := []byte{} valid := []string{"file", "func", "line", "msg", "pkg", "time", "type", "utc"} ics := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-$") // Define the parse states. // normal - capture each byte // spec - capture a specification of the form %<id> or %(<fmt>)<id>. type state int const ( normal state = iota spec ) s := normal ib := []byte(input) for i := 0; i < len(ib); i++ { b := ib[i] switch s { case normal: // normal state, this is all of the stuff in the // template that is not part of a specification. if b == '%' { s = spec } else { ofmtb = append(ofmtb, b) } case spec: s = normal // after parsing the spec go back to the normal state // specification state, parse specifications of the form: // %(<fmt>)<id> // %<id> beg := i - 1 if b == '(' { // This is a format specification. Capture it. // If ')' is not found, report an error. j := i // ib[j] == '(' for ; i < len(ib) && ib[i] != ')'; i++ { } if i >= len(ib) { err = fmt.Errorf("missing ')' for '%v'", string(ib[beg:])) return } ofmtb = append(ofmtb, '%') ofmtb = append(ofmtb, ib[j+1:i]...) ofmtb = append(ofmtb, 'v') i++ // point past the ')' } else { ofmtb = append(ofmtb, []byte("%v")[:]...) } // Now parse out the id. id := "" for _, v := range valid { ba := []byte(v) if bytes.HasPrefix(ib[i:], ba) { // We MAY have a match. // for example '%line' matches but '%linex' does not. i += len(ba) if i < len(ib) { bs := []byte{ib[i]} if bytes.Contains([]byte(ics), bs) { ofmt = string(ofmtb) ba = append(ba, ib[i]) err = fmt.Errorf("unrecognized specification id '%v'", string(ba)) return } } id = string(ba) i-- break } } if id == "" { ofmt = string(ofmtb) err = fmt.Errorf("specification syntax error '%v'", string(ib[beg:])) return } oids = append(oids, id) } } ofmt = string(ofmtb) return }
/* Printf prints directly to the log without the format string.
random_line_split
msg.go
/* Package msg creates a simple messaging package with flexible formatting that can be used to write to multiple sinks. Here is an example use. import ( "jlinoff/utils/msg" "io" "os" ) // My package logger. var log *msg.Object // Initialize it at startup. func init() { // Only write to stdout. w := []io.Writer{os.Stdout} // The name of my package. It is only used %pkg is specified in the // format string. n := "MyPackage" // Format string. Note that i could use %utc instead of %time to get // UTC time. f := `%pkg %(-27)time %(-7)type %file %line - %msg` // Time format string, only used if %time or %utc are specified in the // the format string. t := `2006-01-02 15:05:05.000 MST` // Create the message object. // Note that this is the same as this because I used the defaults. // msg.NewMsg("MyPackage", "", "", []io.Writer{}) l, e := msg.NewMsg(n, f, t, w) if e != nil { panic(e) } log = l } func test() { log.Debug("message of type %v", "debug") log.Info("info message") log.Warn("warning") // Now print messages to stdout and to a log while in this scope. fp, _ := os.Create("log.txt") log.Writers = append(log.Writers, fp) // This stuff will go to stdout and the log file. log.Info("both") log.ErrNoExit("bad stuff happened but i can recover!") log.Printf("this is random text that is not formatted\n") // Clean up by removing the file from the writers and then // closing it. log.Writers = log.Writers[:len(log.Writers)-1] fp.Close() } */ package msg import ( "bytes" "fmt" "io" "os" "path" "runtime" "strconv" "time" ) // Interface defines logger functions. type Interface interface { Debug(f string, a ...interface{}) Info(f string, a ...interface{}) Warn(f string, a ...interface{}) Err(f string, a ...interface{}) ErrNoExit(f string, a ...interface{}) DebugWithLevel(l int, f string, a ...interface{}) InfoWithLevel(l int, f string, a ...interface{}) WarnWithLevel(l int, f string, a ...interface{}) ErrWithLevel(l int, f string, a ...interface{}) ErrNoExitWithLevel(l int, f string, a ...interface{}) Printf(f string, a ...interface{}) } // Object defines the logger. type Object struct { // Name is the package name. It is accessed in the format string by %pkg. Name string // DebugEnabled enables debug messages if true. // It is true by default. DebugEnabled bool // InfoEnabled enables info messages if true. // It is true by default. InfoEnabled bool // WarningEnabled enables warning messages if true. // It is true by default. WarningEnabled bool // ErrorExitCode is the exit code to use for the Error function. // The default is 1. ErrorExitCode int // Writers for the message output. // If no writers are specified, messages go to os.Stdout. Writers []io.Writer // TimeFormat is the format of the prefix timestamp. // See time.Format for details. // The default format is: "2006-01-02 15:05:05.000 MST" TimeFormat string // Format is the template for the output. It has the following specifiers. // // %file is the caller file name // %func is the function name // %line is the line number // %msg is the actual message // %pkg is the package name // %time is the time format in the current locale // %utc is the time format in the UTC locale // %type is the msg type: DEBUG, INFO, WARNING, ERROR // %% is a single % character // // You can explicitly format each field by specifying the formatting // options in parentheses. // // %(-28)time // // Any other text is left verbatim. // // The default format is. // `%(-27)time %(-7)type %file %line - %msg` Format string // outputFormat created by NewMsg and used to generate a message. outputFormat string // outputFlds created by NewMsg and used to specify the fields. outputFlds []string } // NewMsg makes a message object. // n - package name // f - format string, set to "" to get the default. // t - time stamp format, set to "" to get the default // w - the list of writers, if empty all messages go to stdout func NewMsg(n string, f string, t string, w []io.Writer) (obj *Object, err error) { obj = new(Object) obj.Name = n obj.DebugEnabled = true obj.InfoEnabled = true obj.WarningEnabled = true obj.ErrorExitCode = 1 if len(w) == 0 { obj.Writers = append(obj.Writers, os.Stdout) } else { obj.Writers = w } // Set the time format. If it is empty, set the default. if t == "" { obj.TimeFormat = "2006-01-02 15:05:05.000 MST" } else { obj.TimeFormat = t } // Set the format. If it is empty use the default. if f == "" { obj.Format = `%(-27)time %(-7)type %file %line - %msg` } else { obj.Format = f } // Parse the format. ofmt, oflds, err := ParseFormatString(obj.Format) obj.outputFormat = ofmt obj.outputFlds = oflds return } /* Debug prints a debug message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Debug("%v = %v", key, value) */ func (o Object) Debug(f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", 2, f, a...) } } /* DebugWithLevel prints a debug message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.DebugWithLevel(2, "%v = %v", key, value) */ func (o Object) DebugWithLevel(l int, f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", l, f, a...) } } /* Info prints an info message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Info("%v = %v", key, value) */ func (o Object)
(f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", 2, f, a...) } } /* InfoWithLevel prints an info message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.InfoWithLevel(2, "%v = %v", key, value) */ func (o Object) InfoWithLevel(l int, f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", l, f, a...) } } /* Warn prints a warning message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Warn("%v = %v", key, value) */ func (o Object) Warn(f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* WarnWithLevel prints a warning message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.WarnWithLevel(2, "%v = %v", key, value) */ func (o Object) WarnWithLevel(l int, f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* Err prints an error message obtaining the callers filename, function and line number and exits. It cannot be disabled. It automatically appends a new line. Example: msg.Err("%v = %v", key, value) */ func (o Object) Err(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug() and exits. It cannot be disabled. It automatically appends a new line. Example: msg.ErrWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrNoExit prints an error message obtaining the callers filename, function and line number. It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExit("%v = %v", key, value) */ func (o Object) ErrNoExit(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) } /* ErrNoExitWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExitWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) } /* Printf prints directly to the log without the format string. It allows you to insert arbitrary text. Unlike the other functions it does not automatically append a new line. Example: msg.Printf("this is just random text that goes to all writers\n") */ func (o Object) Printf(f string, a ...interface{}) { // Create the formatted output string. s := fmt.Sprintf(f, a...) // Output it for each writer. for _, w := range o.Writers { fmt.Fprintf(w, s) } } /* PrintMsg is the basis of all message printers except Printf. It prints the formatted messages and normally would not be called directly. t - is the type, normally one of DEBUG, INFO, WARNING or ERROR l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on f - format string a - argument list */ func (o Object) PrintMsg(t string, l int, f string, a ...interface{}) { pc, fname, lineno, _ := runtime.Caller(l) fct := runtime.FuncForPC(pc).Name() fname = path.Base(fname[0 : len(fname)-3]) // strip off ".go" // The variables map for the format string. m := map[string]string{ "file": fname, "func": fct, "line": strconv.Itoa(lineno), "msg": fmt.Sprintf(f, a...), "pkg": o.Name, "time": time.Now().Truncate(time.Millisecond).Format(o.TimeFormat), "utc": time.Now().UTC().Truncate(time.Millisecond).Format(o.TimeFormat), "type": t, } // Collect the field values. var flds []interface{} for _, k := range o.outputFlds { if v, ok := m[k]; ok { flds = append(flds, v) } else { // This is, essentially, an assert. It should never happen. fmt.Fprintf(os.Stderr, "ERROR: unexpected condition, invalid specification id '%v'\n", k) os.Exit(1) } } // Create the formatted output string. s := fmt.Sprintf(o.outputFormat, flds...) + "\n" // Output it for each writer. for _, w := range o.Writers { _, err := fmt.Fprintf(w, s) if err != nil { fmt.Fprintf(os.Stderr, ` FATAL: fmt.Fprintf() failed for writer %v call stack = %v %v %v output = %v error = %v `, w, m["file"], m["func"], m["line"], s[:len(s)-2], err) os.Exit(1) } } } /* ParseFormatString transforms a format template to a format string and the list of fields to print in each message. It is meant to be used internally by NewMsg(). Here is an example transformation: input = "MYSTUFF %(-27)time %(-7)type %file %line - %msg" // TRANSFORM ofmt = "MYSTUFF %-27v %-7v %v %v - %v" oids = ["time", "type", "type", "file", "line", "msg"] */ func ParseFormatString(input string) (ofmt string, oids []string, err error) { ofmtb := []byte{} valid := []string{"file", "func", "line", "msg", "pkg", "time", "type", "utc"} ics := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-$") // Define the parse states. // normal - capture each byte // spec - capture a specification of the form %<id> or %(<fmt>)<id>. type state int const ( normal state = iota spec ) s := normal ib := []byte(input) for i := 0; i < len(ib); i++ { b := ib[i] switch s { case normal: // normal state, this is all of the stuff in the // template that is not part of a specification. if b == '%' { s = spec } else { ofmtb = append(ofmtb, b) } case spec: s = normal // after parsing the spec go back to the normal state // specification state, parse specifications of the form: // %(<fmt>)<id> // %<id> beg := i - 1 if b == '(' { // This is a format specification. Capture it. // If ')' is not found, report an error. j := i // ib[j] == '(' for ; i < len(ib) && ib[i] != ')'; i++ { } if i >= len(ib) { err = fmt.Errorf("missing ')' for '%v'", string(ib[beg:])) return } ofmtb = append(ofmtb, '%') ofmtb = append(ofmtb, ib[j+1:i]...) ofmtb = append(ofmtb, 'v') i++ // point past the ')' } else { ofmtb = append(ofmtb, []byte("%v")[:]...) } // Now parse out the id. id := "" for _, v := range valid { ba := []byte(v) if bytes.HasPrefix(ib[i:], ba) { // We MAY have a match. // for example '%line' matches but '%linex' does not. i += len(ba) if i < len(ib) { bs := []byte{ib[i]} if bytes.Contains([]byte(ics), bs) { ofmt = string(ofmtb) ba = append(ba, ib[i]) err = fmt.Errorf("unrecognized specification id '%v'", string(ba)) return } } id = string(ba) i-- break } } if id == "" { ofmt = string(ofmtb) err = fmt.Errorf("specification syntax error '%v'", string(ib[beg:])) return } oids = append(oids, id) } } ofmt = string(ofmtb) return }
Info
identifier_name
msg.go
/* Package msg creates a simple messaging package with flexible formatting that can be used to write to multiple sinks. Here is an example use. import ( "jlinoff/utils/msg" "io" "os" ) // My package logger. var log *msg.Object // Initialize it at startup. func init() { // Only write to stdout. w := []io.Writer{os.Stdout} // The name of my package. It is only used %pkg is specified in the // format string. n := "MyPackage" // Format string. Note that i could use %utc instead of %time to get // UTC time. f := `%pkg %(-27)time %(-7)type %file %line - %msg` // Time format string, only used if %time or %utc are specified in the // the format string. t := `2006-01-02 15:05:05.000 MST` // Create the message object. // Note that this is the same as this because I used the defaults. // msg.NewMsg("MyPackage", "", "", []io.Writer{}) l, e := msg.NewMsg(n, f, t, w) if e != nil { panic(e) } log = l } func test() { log.Debug("message of type %v", "debug") log.Info("info message") log.Warn("warning") // Now print messages to stdout and to a log while in this scope. fp, _ := os.Create("log.txt") log.Writers = append(log.Writers, fp) // This stuff will go to stdout and the log file. log.Info("both") log.ErrNoExit("bad stuff happened but i can recover!") log.Printf("this is random text that is not formatted\n") // Clean up by removing the file from the writers and then // closing it. log.Writers = log.Writers[:len(log.Writers)-1] fp.Close() } */ package msg import ( "bytes" "fmt" "io" "os" "path" "runtime" "strconv" "time" ) // Interface defines logger functions. type Interface interface { Debug(f string, a ...interface{}) Info(f string, a ...interface{}) Warn(f string, a ...interface{}) Err(f string, a ...interface{}) ErrNoExit(f string, a ...interface{}) DebugWithLevel(l int, f string, a ...interface{}) InfoWithLevel(l int, f string, a ...interface{}) WarnWithLevel(l int, f string, a ...interface{}) ErrWithLevel(l int, f string, a ...interface{}) ErrNoExitWithLevel(l int, f string, a ...interface{}) Printf(f string, a ...interface{}) } // Object defines the logger. type Object struct { // Name is the package name. It is accessed in the format string by %pkg. Name string // DebugEnabled enables debug messages if true. // It is true by default. DebugEnabled bool // InfoEnabled enables info messages if true. // It is true by default. InfoEnabled bool // WarningEnabled enables warning messages if true. // It is true by default. WarningEnabled bool // ErrorExitCode is the exit code to use for the Error function. // The default is 1. ErrorExitCode int // Writers for the message output. // If no writers are specified, messages go to os.Stdout. Writers []io.Writer // TimeFormat is the format of the prefix timestamp. // See time.Format for details. // The default format is: "2006-01-02 15:05:05.000 MST" TimeFormat string // Format is the template for the output. It has the following specifiers. // // %file is the caller file name // %func is the function name // %line is the line number // %msg is the actual message // %pkg is the package name // %time is the time format in the current locale // %utc is the time format in the UTC locale // %type is the msg type: DEBUG, INFO, WARNING, ERROR // %% is a single % character // // You can explicitly format each field by specifying the formatting // options in parentheses. // // %(-28)time // // Any other text is left verbatim. // // The default format is. // `%(-27)time %(-7)type %file %line - %msg` Format string // outputFormat created by NewMsg and used to generate a message. outputFormat string // outputFlds created by NewMsg and used to specify the fields. outputFlds []string } // NewMsg makes a message object. // n - package name // f - format string, set to "" to get the default. // t - time stamp format, set to "" to get the default // w - the list of writers, if empty all messages go to stdout func NewMsg(n string, f string, t string, w []io.Writer) (obj *Object, err error) { obj = new(Object) obj.Name = n obj.DebugEnabled = true obj.InfoEnabled = true obj.WarningEnabled = true obj.ErrorExitCode = 1 if len(w) == 0 { obj.Writers = append(obj.Writers, os.Stdout) } else { obj.Writers = w } // Set the time format. If it is empty, set the default. if t == "" { obj.TimeFormat = "2006-01-02 15:05:05.000 MST" } else { obj.TimeFormat = t } // Set the format. If it is empty use the default. if f == "" { obj.Format = `%(-27)time %(-7)type %file %line - %msg` } else { obj.Format = f } // Parse the format. ofmt, oflds, err := ParseFormatString(obj.Format) obj.outputFormat = ofmt obj.outputFlds = oflds return } /* Debug prints a debug message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Debug("%v = %v", key, value) */ func (o Object) Debug(f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", 2, f, a...) } } /* DebugWithLevel prints a debug message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.DebugWithLevel(2, "%v = %v", key, value) */ func (o Object) DebugWithLevel(l int, f string, a ...interface{}) { if o.DebugEnabled { o.PrintMsg("DEBUG", l, f, a...) } } /* Info prints an info message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Info("%v = %v", key, value) */ func (o Object) Info(f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", 2, f, a...) } } /* InfoWithLevel prints an info message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.InfoWithLevel(2, "%v = %v", key, value) */ func (o Object) InfoWithLevel(l int, f string, a ...interface{}) { if o.InfoEnabled { o.PrintMsg("INFO", l, f, a...) } } /* Warn prints a warning message obtaining the callers filename, function and line number. It automatically appends a new line. Example: msg.Warn("%v = %v", key, value) */ func (o Object) Warn(f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* WarnWithLevel prints a warning message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It automatically appends a new line. Example: msg.WarnWithLevel(2, "%v = %v", key, value) */ func (o Object) WarnWithLevel(l int, f string, a ...interface{}) { if o.WarningEnabled { o.PrintMsg("WARNING", 2, f, a...) } } /* Err prints an error message obtaining the callers filename, function and line number and exits. It cannot be disabled. It automatically appends a new line. Example: msg.Err("%v = %v", key, value) */ func (o Object) Err(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug() and exits. It cannot be disabled. It automatically appends a new line. Example: msg.ErrWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) os.Exit(o.ErrorExitCode) } /* ErrNoExit prints an error message obtaining the callers filename, function and line number. It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExit("%v = %v", key, value) */ func (o Object) ErrNoExit(f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) } /* ErrNoExitWithLevel prints an error message obtaining the filename, function and line number from the caller specified by level "l". l=2 is the same as Debug(). It does not exit and cannot be disabled. It automatically appends a new line. Example: msg.ErrNoExitWithLevel(2, "%v = %v", key, value) */ func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) { o.PrintMsg("ERROR", 2, f, a...) } /* Printf prints directly to the log without the format string. It allows you to insert arbitrary text. Unlike the other functions it does not automatically append a new line. Example: msg.Printf("this is just random text that goes to all writers\n") */ func (o Object) Printf(f string, a ...interface{})
/* PrintMsg is the basis of all message printers except Printf. It prints the formatted messages and normally would not be called directly. t - is the type, normally one of DEBUG, INFO, WARNING or ERROR l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on f - format string a - argument list */ func (o Object) PrintMsg(t string, l int, f string, a ...interface{}) { pc, fname, lineno, _ := runtime.Caller(l) fct := runtime.FuncForPC(pc).Name() fname = path.Base(fname[0 : len(fname)-3]) // strip off ".go" // The variables map for the format string. m := map[string]string{ "file": fname, "func": fct, "line": strconv.Itoa(lineno), "msg": fmt.Sprintf(f, a...), "pkg": o.Name, "time": time.Now().Truncate(time.Millisecond).Format(o.TimeFormat), "utc": time.Now().UTC().Truncate(time.Millisecond).Format(o.TimeFormat), "type": t, } // Collect the field values. var flds []interface{} for _, k := range o.outputFlds { if v, ok := m[k]; ok { flds = append(flds, v) } else { // This is, essentially, an assert. It should never happen. fmt.Fprintf(os.Stderr, "ERROR: unexpected condition, invalid specification id '%v'\n", k) os.Exit(1) } } // Create the formatted output string. s := fmt.Sprintf(o.outputFormat, flds...) + "\n" // Output it for each writer. for _, w := range o.Writers { _, err := fmt.Fprintf(w, s) if err != nil { fmt.Fprintf(os.Stderr, ` FATAL: fmt.Fprintf() failed for writer %v call stack = %v %v %v output = %v error = %v `, w, m["file"], m["func"], m["line"], s[:len(s)-2], err) os.Exit(1) } } } /* ParseFormatString transforms a format template to a format string and the list of fields to print in each message. It is meant to be used internally by NewMsg(). Here is an example transformation: input = "MYSTUFF %(-27)time %(-7)type %file %line - %msg" // TRANSFORM ofmt = "MYSTUFF %-27v %-7v %v %v - %v" oids = ["time", "type", "type", "file", "line", "msg"] */ func ParseFormatString(input string) (ofmt string, oids []string, err error) { ofmtb := []byte{} valid := []string{"file", "func", "line", "msg", "pkg", "time", "type", "utc"} ics := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-$") // Define the parse states. // normal - capture each byte // spec - capture a specification of the form %<id> or %(<fmt>)<id>. type state int const ( normal state = iota spec ) s := normal ib := []byte(input) for i := 0; i < len(ib); i++ { b := ib[i] switch s { case normal: // normal state, this is all of the stuff in the // template that is not part of a specification. if b == '%' { s = spec } else { ofmtb = append(ofmtb, b) } case spec: s = normal // after parsing the spec go back to the normal state // specification state, parse specifications of the form: // %(<fmt>)<id> // %<id> beg := i - 1 if b == '(' { // This is a format specification. Capture it. // If ')' is not found, report an error. j := i // ib[j] == '(' for ; i < len(ib) && ib[i] != ')'; i++ { } if i >= len(ib) { err = fmt.Errorf("missing ')' for '%v'", string(ib[beg:])) return } ofmtb = append(ofmtb, '%') ofmtb = append(ofmtb, ib[j+1:i]...) ofmtb = append(ofmtb, 'v') i++ // point past the ')' } else { ofmtb = append(ofmtb, []byte("%v")[:]...) } // Now parse out the id. id := "" for _, v := range valid { ba := []byte(v) if bytes.HasPrefix(ib[i:], ba) { // We MAY have a match. // for example '%line' matches but '%linex' does not. i += len(ba) if i < len(ib) { bs := []byte{ib[i]} if bytes.Contains([]byte(ics), bs) { ofmt = string(ofmtb) ba = append(ba, ib[i]) err = fmt.Errorf("unrecognized specification id '%v'", string(ba)) return } } id = string(ba) i-- break } } if id == "" { ofmt = string(ofmtb) err = fmt.Errorf("specification syntax error '%v'", string(ib[beg:])) return } oids = append(oids, id) } } ofmt = string(ofmtb) return }
{ // Create the formatted output string. s := fmt.Sprintf(f, a...) // Output it for each writer. for _, w := range o.Writers { fmt.Fprintf(w, s) } }
identifier_body
examples.js
/*****************.REDUCE (callback function) **********************/ //reduce() method applies a function against an accumulator and each element in the array (from left to right) to reduce //it to a single value. var numbers = [2,4,5,7,8,9]; var sum = numbers.reduce(function(runningTotal, num) { return runningTotal += num; }); console.log(sum); // sum is 35 // var sum = [0, 1, 2, 3].reduce(function (a, b) { return a + b; }, 0); // sum is 6 /*****************FOREACH (callback function)**********************/ //forEach() method executes a provided function once for each array element var numbers = [2,4,5,7,8,9]; numbers.forEach(function(num) { console.log(num); }); /*****************.MAP (callback function)*********************/ //The map() method creates a new array with the results of calling a provided function on every element in this array. var numbers = [1, 4, 9]; var roots = numbers.map(Math.sqrt); // roots is now [1, 2, 3] // numbers is still [1, 4, 9] var numbers = [2,4,5,7,8,9]; var squares = numbers.map(function (num) { return num * num; }); console.log(squares); // outputs [4, 16, 25, 49, 64, 81] /*****************RECURSION**********************/ function factorial(n) { if (n === 0) { return 1; } // This is it! Recursion!! return n * factorial(n - 1); } console.log(factorial(10)); /*****************FOR LOOPS**********************/ // print all odd numbers between 300 and 333 for(var i = 300; i <= 333; i++) { if(i % 2 !== 0) { console.log(i); } } // fizz buzz question // print 1 to 100 any num / by 3 print fizz, / 5 buzz and both fizzbuzz for (var i = 1; i <= 100; i++) { var words = ""; if (i % 3 === 0 && i % 5 === 0) { words += "FizzBuzz"; } else if (i % 3 === 0) { words += "Fizz"; } else if(i % 5 === 0) { words += "Buzz"; } else { words += i; } console.log(words); } /*****************WHILE LOOPS**********************/ // PRINT EVEN NUMBERS BETWEEN 10 AND 40 var count = 10; while(count <= 40){ console.log(count); count+=2; } // PRINT ALL ODD NUMBERS BETWEEN 300 333 var count = 300; while(count <=333){ if(count % 2 === 0){ console.log(count); } count += 1; } /*****************FUNCTIONS**********************/ // Replace - with _ within a string function kebabToSnake(str) { //replace all '-' with '_' var newStr = str.replace(/-/g , "_"); //return str return newStr; } function wordsToSentence(words) { //words is an array of strings //return a string that is all of the words concatenated together //spaces need to be between each word //example: ['Hello', 'world!'] -> 'Hello world!' return words.join(' '); } function combineNames(firstName, lastName) { //return firstName and lastName combined as one string and separated by a space. //'Lambda', 'School' -> 'Lambda School' var fullName = firstName + ' ' + lastName; return fullName; } function getGreeting(name) { //Take the name string and concatenate other strings onto it so it takes the following form: //'Sam' -> 'Hello Sam!' var greeting = 'Hello ' + name + '!'; return greeting; } function getRectangleArea(length, width) { //return the area of the rectangle by using length and width var area = length * width; return area; } function getRectangularPrismVolume(length, width, height) { //return the volume of the 3D rectangular prism given the length, width, and height var prismed = length * width * height; return prismed; } /*****************REVERSE A STRING **********************/ // This splits the string, reverses it and joins it again function FirstReverse(str) { // code goes here var splitString = str.split(""); var reverseArray = splitString.reverse(); var joinArray = reverseArray.join(""); console.log(joinArray); } // keep this function call here FirstReverse("hello"); /*****************CHECK NUMBER IS PRIME **********************/ function primeChecker (num) { if (num < 1) return false; var newNum = Math.floor(Math.sqrt(num)); for (var i = 2; i <= newNum; i++) { if (num % i === 0) { return false; } } return true; } console.log(primeChecker(15)); /*****************FUNCTIONS + IF ELSE**********************/ function fizzBuzz(num) { //if num is divisible by 3 return 'fizz' //if num is divisible by 5 return 'buzz' //if num is divisible by 3 & 5 return 'fizzbuzz' //otherwise return num if(num % 3 === 0 && num % 5 === 0) { return 'fizzbuzz'; } else if(num % 3 === 0) { return 'fizz'; }else if(num % 5 === 0) { return 'buzz'; } else { return num; } } function colorOf(r,g,b){ var red = r.toString(16); if (red.length === 1) red = "0" + red; var green = g.toString(16); if (green.length === 1) green = "0" + green; var blue = b.toString(16); if (blue.length === 1) blue = "0" + blue; return "#" + red + green + blue; } colorOf(255,0,0); // outputs '#ff0000' // turns numbers to a hex code string function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers for(var i = 2; i < num; i++) { if(num % i === 0) { return false; } } return num > 1; } function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers if(num < 2) return false; for (var i = 2; i < num; i++) { if(num%i==0) return false; } return true; } function personAge(yearOfBirth) { var age = 2017 - yearOfBirth; if(age >= 16) { console.log("you can drive"); } else { console.log("you are not old enough to drive"); } } personAge(2011); //find the middle of a random odd or even string function mid(str) { var middle = Math.floor(str.length / 2); if(str.length % 2 === 0){ return str[middle - 1] + str[middle]; } else { return str[middle]; } } console.log(mid('computer')); //access array items function uefaEuro2016(teams, scores){ // your code... if(scores[0] === scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.'; } else if(scores[0] > scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!'; } else if(scores[1] > scores[0]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!'; } } (uefaEuro2016(['Germany', 'Ukraine'], [2, 0]) /*****************FUNCTIONS + FOR LOOP**********************/ function averageTestScore(testScores) { //testScores is an array. Iterate over testScores and compute the average. //return the average var average = 0; for (var i = 0; i < testScores.length; i++) { average = average + testScores[i]; } average = average / testScores.length; return average; } // LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY function pickIt(arr){ var odd=[],even=[]; for (var i = 0; i < arr.length; i++){ if (arr[i] % 2 === 1) { odd.push(arr[i]); } else { even.push(arr[i]); } } return [odd,even]; } pickIt(6,9); // LOOP IN A LOOP :O console.log('******************************'); var twoDimensionArray = [ ['Amul', 'Amanda', 'Harriet'], ['Anisa', 'Mauro', 'Matty'], ['Jonnie', 'Emily', 'Nick'] ]; for( var i = 0; i < twoDimensionArray.length; i++ ){ var namesArr = twoDimensionArray[i]; for( var x = 0; x < namesArr.length; x++ ){ console.log( twoDimensionArray[i][x] + ' is in array ' + i + ' in the twoDimensional array and has index ' + x + ' in its own array'); } } //function with a for loop, if statement, break and continue function grabDoll(dolls){ var bag=[]; for(var i = 0; i < dolls.length; i++) { if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll") bag.push(dolls[i]); else continue; if(bag.length === 3) break; } return bag; } /*****************FUNCTIONS + IF ELSE + FOR **********************/ function largestNumber(numbers) { //numbers is an array of integers //return the largest integer var largestNum = 0; for (var i = 0; i < numbers.length; i++) { if (numbers[i] > largestNum) { largestNum = numbers[i]; } } return largestNum; } /*****************FUNCTIONS + OBJECTS**********************/ function makeCat(name, age)
function newUser(name, email, password) { //create a new object with properties matching the arguments passed in. //return the new object var newObj = { name: name, email: email, password: password }; return newObj; } var languages = { english: 'Welcome', czech: 'Vitejte', danish: 'Velkomst', dutch: 'Welkom', estonian: 'Tere tulemast', finnish: 'Tervetuloa', flemish: 'Welgekomen', french: 'Bienvenue', german: 'Willkommen', irish: 'Failte', italian: 'Benvenuto', latvian: 'Gaidits', lithuanian: 'Laukiamas', polish: 'Witamy', spanish: 'Bienvenido', swedish: 'Valkommen', welsh: 'Croeso' } function greet(language) { for(var key in languages){ if(key === language){ return languages[key]; } } return "Welcome"; } /*****************FUNCTIONS INSIDE FUNCTIONS**********************/ function addCalculateDiscountPriceMethod(storeItem) { //add a method to the storeItem object called 'calculateDiscountPrice' //this method should multiply the storeItem's 'price' and 'discountPercentage' to get the discount //the method then subtracts the discount from the price and returns the discounted price //example: //price -> 20 //discountPercentage -> .2 //discountPrice = 20 - (20 * .2) storeItem.calculateDiscountPrice = function () { var discount = storeItem.price * storeItem.discountPercentage; storeItem.discountPrice = storeItem.price - discount; return storeItem.discountPrice; }; return storeItem; } /*****************FUNCTIONS PIZZA ORDER**********************/ // pizza order function var orderCount=0; function takeOrder(topping, crustType) { console.log('Order: ' + crustType + ' pizza topped with ' + topping); orderCount=orderCount + 1; } function getSubTotal(itemCount) { return itemCount*8.97; } function getTax() { return getSubTotal(orderCount) * 0.20; } function getTotal() { return getSubTotal(orderCount) + getTax(); } takeOrder('bacon', 'thin crust'); takeOrder('pepperoni', 'hand-tossed'); takeOrder('sausage', 'deep dish'); takeOrder('chicken', 'thin crust'); takeOrder('meatballs', 'hand-tossed'); takeOrder('anchovies', 'thin crust'); console.log('Sub-Total: £' + getSubTotal(orderCount).toFixed(2)); console.log('Tax: £' + getTax().toFixed(2)); console.log('Total: £' + getTotal().toFixed(2)); /* -----------------CALCULATE BILL ------------------------- The function takes a bill, a VAT percent and a tip percent and returns the total bill, formatted as a currency string. ---------------------------------------------------------- */ function calculateBill (bill, VATrate, TipRate) { var tip = (TipRate / 100) * bill; var vat = (VATrate / 100) * bill; return '£' + (bill + tip + vat).toFixed(2); } var result = calculateBill(88, 18, 15); console.log('The total bill in this example is: ' + result);
{ //create a new object with a name property with the value set to the name argument //add an age property to the object with the value set to the age argument //add a method called meow that returns the string 'Meow!' //return the object var newCat = { name: name, age: age, meow:function () { return 'Meow!'; } }; return newCat; }
identifier_body
examples.js
/*****************.REDUCE (callback function) **********************/ //reduce() method applies a function against an accumulator and each element in the array (from left to right) to reduce //it to a single value. var numbers = [2,4,5,7,8,9]; var sum = numbers.reduce(function(runningTotal, num) { return runningTotal += num; }); console.log(sum); // sum is 35 // var sum = [0, 1, 2, 3].reduce(function (a, b) { return a + b; }, 0); // sum is 6 /*****************FOREACH (callback function)**********************/ //forEach() method executes a provided function once for each array element var numbers = [2,4,5,7,8,9]; numbers.forEach(function(num) { console.log(num); }); /*****************.MAP (callback function)*********************/ //The map() method creates a new array with the results of calling a provided function on every element in this array. var numbers = [1, 4, 9]; var roots = numbers.map(Math.sqrt); // roots is now [1, 2, 3] // numbers is still [1, 4, 9] var numbers = [2,4,5,7,8,9]; var squares = numbers.map(function (num) { return num * num; }); console.log(squares); // outputs [4, 16, 25, 49, 64, 81] /*****************RECURSION**********************/ function factorial(n) { if (n === 0) { return 1; } // This is it! Recursion!! return n * factorial(n - 1); } console.log(factorial(10)); /*****************FOR LOOPS**********************/ // print all odd numbers between 300 and 333 for(var i = 300; i <= 333; i++) { if(i % 2 !== 0) { console.log(i); } } // fizz buzz question // print 1 to 100 any num / by 3 print fizz, / 5 buzz and both fizzbuzz for (var i = 1; i <= 100; i++) { var words = ""; if (i % 3 === 0 && i % 5 === 0) { words += "FizzBuzz"; } else if (i % 3 === 0) { words += "Fizz"; } else if(i % 5 === 0) { words += "Buzz"; } else { words += i; } console.log(words); } /*****************WHILE LOOPS**********************/ // PRINT EVEN NUMBERS BETWEEN 10 AND 40 var count = 10; while(count <= 40){ console.log(count); count+=2; } // PRINT ALL ODD NUMBERS BETWEEN 300 333 var count = 300; while(count <=333){ if(count % 2 === 0){ console.log(count); } count += 1; } /*****************FUNCTIONS**********************/ // Replace - with _ within a string function kebabToSnake(str) { //replace all '-' with '_' var newStr = str.replace(/-/g , "_"); //return str return newStr; } function wordsToSentence(words) { //words is an array of strings //return a string that is all of the words concatenated together //spaces need to be between each word //example: ['Hello', 'world!'] -> 'Hello world!' return words.join(' '); } function combineNames(firstName, lastName) { //return firstName and lastName combined as one string and separated by a space. //'Lambda', 'School' -> 'Lambda School' var fullName = firstName + ' ' + lastName; return fullName; } function getGreeting(name) { //Take the name string and concatenate other strings onto it so it takes the following form: //'Sam' -> 'Hello Sam!' var greeting = 'Hello ' + name + '!'; return greeting; } function getRectangleArea(length, width) { //return the area of the rectangle by using length and width var area = length * width; return area; } function getRectangularPrismVolume(length, width, height) { //return the volume of the 3D rectangular prism given the length, width, and height var prismed = length * width * height; return prismed; } /*****************REVERSE A STRING **********************/ // This splits the string, reverses it and joins it again function FirstReverse(str) { // code goes here var splitString = str.split(""); var reverseArray = splitString.reverse(); var joinArray = reverseArray.join(""); console.log(joinArray); } // keep this function call here FirstReverse("hello"); /*****************CHECK NUMBER IS PRIME **********************/ function primeChecker (num) { if (num < 1) return false; var newNum = Math.floor(Math.sqrt(num)); for (var i = 2; i <= newNum; i++) { if (num % i === 0) { return false; } } return true; } console.log(primeChecker(15)); /*****************FUNCTIONS + IF ELSE**********************/ function fizzBuzz(num) { //if num is divisible by 3 return 'fizz' //if num is divisible by 5 return 'buzz' //if num is divisible by 3 & 5 return 'fizzbuzz' //otherwise return num if(num % 3 === 0 && num % 5 === 0) { return 'fizzbuzz'; } else if(num % 3 === 0) { return 'fizz'; }else if(num % 5 === 0) { return 'buzz'; } else {
function colorOf(r,g,b){ var red = r.toString(16); if (red.length === 1) red = "0" + red; var green = g.toString(16); if (green.length === 1) green = "0" + green; var blue = b.toString(16); if (blue.length === 1) blue = "0" + blue; return "#" + red + green + blue; } colorOf(255,0,0); // outputs '#ff0000' // turns numbers to a hex code string function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers for(var i = 2; i < num; i++) { if(num % i === 0) { return false; } } return num > 1; } function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers if(num < 2) return false; for (var i = 2; i < num; i++) { if(num%i==0) return false; } return true; } function personAge(yearOfBirth) { var age = 2017 - yearOfBirth; if(age >= 16) { console.log("you can drive"); } else { console.log("you are not old enough to drive"); } } personAge(2011); //find the middle of a random odd or even string function mid(str) { var middle = Math.floor(str.length / 2); if(str.length % 2 === 0){ return str[middle - 1] + str[middle]; } else { return str[middle]; } } console.log(mid('computer')); //access array items function uefaEuro2016(teams, scores){ // your code... if(scores[0] === scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.'; } else if(scores[0] > scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!'; } else if(scores[1] > scores[0]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!'; } } (uefaEuro2016(['Germany', 'Ukraine'], [2, 0]) /*****************FUNCTIONS + FOR LOOP**********************/ function averageTestScore(testScores) { //testScores is an array. Iterate over testScores and compute the average. //return the average var average = 0; for (var i = 0; i < testScores.length; i++) { average = average + testScores[i]; } average = average / testScores.length; return average; } // LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY function pickIt(arr){ var odd=[],even=[]; for (var i = 0; i < arr.length; i++){ if (arr[i] % 2 === 1) { odd.push(arr[i]); } else { even.push(arr[i]); } } return [odd,even]; } pickIt(6,9); // LOOP IN A LOOP :O console.log('******************************'); var twoDimensionArray = [ ['Amul', 'Amanda', 'Harriet'], ['Anisa', 'Mauro', 'Matty'], ['Jonnie', 'Emily', 'Nick'] ]; for( var i = 0; i < twoDimensionArray.length; i++ ){ var namesArr = twoDimensionArray[i]; for( var x = 0; x < namesArr.length; x++ ){ console.log( twoDimensionArray[i][x] + ' is in array ' + i + ' in the twoDimensional array and has index ' + x + ' in its own array'); } } //function with a for loop, if statement, break and continue function grabDoll(dolls){ var bag=[]; for(var i = 0; i < dolls.length; i++) { if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll") bag.push(dolls[i]); else continue; if(bag.length === 3) break; } return bag; } /*****************FUNCTIONS + IF ELSE + FOR **********************/ function largestNumber(numbers) { //numbers is an array of integers //return the largest integer var largestNum = 0; for (var i = 0; i < numbers.length; i++) { if (numbers[i] > largestNum) { largestNum = numbers[i]; } } return largestNum; } /*****************FUNCTIONS + OBJECTS**********************/ function makeCat(name, age) { //create a new object with a name property with the value set to the name argument //add an age property to the object with the value set to the age argument //add a method called meow that returns the string 'Meow!' //return the object var newCat = { name: name, age: age, meow:function () { return 'Meow!'; } }; return newCat; } function newUser(name, email, password) { //create a new object with properties matching the arguments passed in. //return the new object var newObj = { name: name, email: email, password: password }; return newObj; } var languages = { english: 'Welcome', czech: 'Vitejte', danish: 'Velkomst', dutch: 'Welkom', estonian: 'Tere tulemast', finnish: 'Tervetuloa', flemish: 'Welgekomen', french: 'Bienvenue', german: 'Willkommen', irish: 'Failte', italian: 'Benvenuto', latvian: 'Gaidits', lithuanian: 'Laukiamas', polish: 'Witamy', spanish: 'Bienvenido', swedish: 'Valkommen', welsh: 'Croeso' } function greet(language) { for(var key in languages){ if(key === language){ return languages[key]; } } return "Welcome"; } /*****************FUNCTIONS INSIDE FUNCTIONS**********************/ function addCalculateDiscountPriceMethod(storeItem) { //add a method to the storeItem object called 'calculateDiscountPrice' //this method should multiply the storeItem's 'price' and 'discountPercentage' to get the discount //the method then subtracts the discount from the price and returns the discounted price //example: //price -> 20 //discountPercentage -> .2 //discountPrice = 20 - (20 * .2) storeItem.calculateDiscountPrice = function () { var discount = storeItem.price * storeItem.discountPercentage; storeItem.discountPrice = storeItem.price - discount; return storeItem.discountPrice; }; return storeItem; } /*****************FUNCTIONS PIZZA ORDER**********************/ // pizza order function var orderCount=0; function takeOrder(topping, crustType) { console.log('Order: ' + crustType + ' pizza topped with ' + topping); orderCount=orderCount + 1; } function getSubTotal(itemCount) { return itemCount*8.97; } function getTax() { return getSubTotal(orderCount) * 0.20; } function getTotal() { return getSubTotal(orderCount) + getTax(); } takeOrder('bacon', 'thin crust'); takeOrder('pepperoni', 'hand-tossed'); takeOrder('sausage', 'deep dish'); takeOrder('chicken', 'thin crust'); takeOrder('meatballs', 'hand-tossed'); takeOrder('anchovies', 'thin crust'); console.log('Sub-Total: £' + getSubTotal(orderCount).toFixed(2)); console.log('Tax: £' + getTax().toFixed(2)); console.log('Total: £' + getTotal().toFixed(2)); /* -----------------CALCULATE BILL ------------------------- The function takes a bill, a VAT percent and a tip percent and returns the total bill, formatted as a currency string. ---------------------------------------------------------- */ function calculateBill (bill, VATrate, TipRate) { var tip = (TipRate / 100) * bill; var vat = (VATrate / 100) * bill; return '£' + (bill + tip + vat).toFixed(2); } var result = calculateBill(88, 18, 15); console.log('The total bill in this example is: ' + result);
return num; } }
random_line_split
examples.js
/*****************.REDUCE (callback function) **********************/ //reduce() method applies a function against an accumulator and each element in the array (from left to right) to reduce //it to a single value. var numbers = [2,4,5,7,8,9]; var sum = numbers.reduce(function(runningTotal, num) { return runningTotal += num; }); console.log(sum); // sum is 35 // var sum = [0, 1, 2, 3].reduce(function (a, b) { return a + b; }, 0); // sum is 6 /*****************FOREACH (callback function)**********************/ //forEach() method executes a provided function once for each array element var numbers = [2,4,5,7,8,9]; numbers.forEach(function(num) { console.log(num); }); /*****************.MAP (callback function)*********************/ //The map() method creates a new array with the results of calling a provided function on every element in this array. var numbers = [1, 4, 9]; var roots = numbers.map(Math.sqrt); // roots is now [1, 2, 3] // numbers is still [1, 4, 9] var numbers = [2,4,5,7,8,9]; var squares = numbers.map(function (num) { return num * num; }); console.log(squares); // outputs [4, 16, 25, 49, 64, 81] /*****************RECURSION**********************/ function
(n) { if (n === 0) { return 1; } // This is it! Recursion!! return n * factorial(n - 1); } console.log(factorial(10)); /*****************FOR LOOPS**********************/ // print all odd numbers between 300 and 333 for(var i = 300; i <= 333; i++) { if(i % 2 !== 0) { console.log(i); } } // fizz buzz question // print 1 to 100 any num / by 3 print fizz, / 5 buzz and both fizzbuzz for (var i = 1; i <= 100; i++) { var words = ""; if (i % 3 === 0 && i % 5 === 0) { words += "FizzBuzz"; } else if (i % 3 === 0) { words += "Fizz"; } else if(i % 5 === 0) { words += "Buzz"; } else { words += i; } console.log(words); } /*****************WHILE LOOPS**********************/ // PRINT EVEN NUMBERS BETWEEN 10 AND 40 var count = 10; while(count <= 40){ console.log(count); count+=2; } // PRINT ALL ODD NUMBERS BETWEEN 300 333 var count = 300; while(count <=333){ if(count % 2 === 0){ console.log(count); } count += 1; } /*****************FUNCTIONS**********************/ // Replace - with _ within a string function kebabToSnake(str) { //replace all '-' with '_' var newStr = str.replace(/-/g , "_"); //return str return newStr; } function wordsToSentence(words) { //words is an array of strings //return a string that is all of the words concatenated together //spaces need to be between each word //example: ['Hello', 'world!'] -> 'Hello world!' return words.join(' '); } function combineNames(firstName, lastName) { //return firstName and lastName combined as one string and separated by a space. //'Lambda', 'School' -> 'Lambda School' var fullName = firstName + ' ' + lastName; return fullName; } function getGreeting(name) { //Take the name string and concatenate other strings onto it so it takes the following form: //'Sam' -> 'Hello Sam!' var greeting = 'Hello ' + name + '!'; return greeting; } function getRectangleArea(length, width) { //return the area of the rectangle by using length and width var area = length * width; return area; } function getRectangularPrismVolume(length, width, height) { //return the volume of the 3D rectangular prism given the length, width, and height var prismed = length * width * height; return prismed; } /*****************REVERSE A STRING **********************/ // This splits the string, reverses it and joins it again function FirstReverse(str) { // code goes here var splitString = str.split(""); var reverseArray = splitString.reverse(); var joinArray = reverseArray.join(""); console.log(joinArray); } // keep this function call here FirstReverse("hello"); /*****************CHECK NUMBER IS PRIME **********************/ function primeChecker (num) { if (num < 1) return false; var newNum = Math.floor(Math.sqrt(num)); for (var i = 2; i <= newNum; i++) { if (num % i === 0) { return false; } } return true; } console.log(primeChecker(15)); /*****************FUNCTIONS + IF ELSE**********************/ function fizzBuzz(num) { //if num is divisible by 3 return 'fizz' //if num is divisible by 5 return 'buzz' //if num is divisible by 3 & 5 return 'fizzbuzz' //otherwise return num if(num % 3 === 0 && num % 5 === 0) { return 'fizzbuzz'; } else if(num % 3 === 0) { return 'fizz'; }else if(num % 5 === 0) { return 'buzz'; } else { return num; } } function colorOf(r,g,b){ var red = r.toString(16); if (red.length === 1) red = "0" + red; var green = g.toString(16); if (green.length === 1) green = "0" + green; var blue = b.toString(16); if (blue.length === 1) blue = "0" + blue; return "#" + red + green + blue; } colorOf(255,0,0); // outputs '#ff0000' // turns numbers to a hex code string function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers for(var i = 2; i < num; i++) { if(num % i === 0) { return false; } } return num > 1; } function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers if(num < 2) return false; for (var i = 2; i < num; i++) { if(num%i==0) return false; } return true; } function personAge(yearOfBirth) { var age = 2017 - yearOfBirth; if(age >= 16) { console.log("you can drive"); } else { console.log("you are not old enough to drive"); } } personAge(2011); //find the middle of a random odd or even string function mid(str) { var middle = Math.floor(str.length / 2); if(str.length % 2 === 0){ return str[middle - 1] + str[middle]; } else { return str[middle]; } } console.log(mid('computer')); //access array items function uefaEuro2016(teams, scores){ // your code... if(scores[0] === scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.'; } else if(scores[0] > scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!'; } else if(scores[1] > scores[0]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!'; } } (uefaEuro2016(['Germany', 'Ukraine'], [2, 0]) /*****************FUNCTIONS + FOR LOOP**********************/ function averageTestScore(testScores) { //testScores is an array. Iterate over testScores and compute the average. //return the average var average = 0; for (var i = 0; i < testScores.length; i++) { average = average + testScores[i]; } average = average / testScores.length; return average; } // LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY function pickIt(arr){ var odd=[],even=[]; for (var i = 0; i < arr.length; i++){ if (arr[i] % 2 === 1) { odd.push(arr[i]); } else { even.push(arr[i]); } } return [odd,even]; } pickIt(6,9); // LOOP IN A LOOP :O console.log('******************************'); var twoDimensionArray = [ ['Amul', 'Amanda', 'Harriet'], ['Anisa', 'Mauro', 'Matty'], ['Jonnie', 'Emily', 'Nick'] ]; for( var i = 0; i < twoDimensionArray.length; i++ ){ var namesArr = twoDimensionArray[i]; for( var x = 0; x < namesArr.length; x++ ){ console.log( twoDimensionArray[i][x] + ' is in array ' + i + ' in the twoDimensional array and has index ' + x + ' in its own array'); } } //function with a for loop, if statement, break and continue function grabDoll(dolls){ var bag=[]; for(var i = 0; i < dolls.length; i++) { if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll") bag.push(dolls[i]); else continue; if(bag.length === 3) break; } return bag; } /*****************FUNCTIONS + IF ELSE + FOR **********************/ function largestNumber(numbers) { //numbers is an array of integers //return the largest integer var largestNum = 0; for (var i = 0; i < numbers.length; i++) { if (numbers[i] > largestNum) { largestNum = numbers[i]; } } return largestNum; } /*****************FUNCTIONS + OBJECTS**********************/ function makeCat(name, age) { //create a new object with a name property with the value set to the name argument //add an age property to the object with the value set to the age argument //add a method called meow that returns the string 'Meow!' //return the object var newCat = { name: name, age: age, meow:function () { return 'Meow!'; } }; return newCat; } function newUser(name, email, password) { //create a new object with properties matching the arguments passed in. //return the new object var newObj = { name: name, email: email, password: password }; return newObj; } var languages = { english: 'Welcome', czech: 'Vitejte', danish: 'Velkomst', dutch: 'Welkom', estonian: 'Tere tulemast', finnish: 'Tervetuloa', flemish: 'Welgekomen', french: 'Bienvenue', german: 'Willkommen', irish: 'Failte', italian: 'Benvenuto', latvian: 'Gaidits', lithuanian: 'Laukiamas', polish: 'Witamy', spanish: 'Bienvenido', swedish: 'Valkommen', welsh: 'Croeso' } function greet(language) { for(var key in languages){ if(key === language){ return languages[key]; } } return "Welcome"; } /*****************FUNCTIONS INSIDE FUNCTIONS**********************/ function addCalculateDiscountPriceMethod(storeItem) { //add a method to the storeItem object called 'calculateDiscountPrice' //this method should multiply the storeItem's 'price' and 'discountPercentage' to get the discount //the method then subtracts the discount from the price and returns the discounted price //example: //price -> 20 //discountPercentage -> .2 //discountPrice = 20 - (20 * .2) storeItem.calculateDiscountPrice = function () { var discount = storeItem.price * storeItem.discountPercentage; storeItem.discountPrice = storeItem.price - discount; return storeItem.discountPrice; }; return storeItem; } /*****************FUNCTIONS PIZZA ORDER**********************/ // pizza order function var orderCount=0; function takeOrder(topping, crustType) { console.log('Order: ' + crustType + ' pizza topped with ' + topping); orderCount=orderCount + 1; } function getSubTotal(itemCount) { return itemCount*8.97; } function getTax() { return getSubTotal(orderCount) * 0.20; } function getTotal() { return getSubTotal(orderCount) + getTax(); } takeOrder('bacon', 'thin crust'); takeOrder('pepperoni', 'hand-tossed'); takeOrder('sausage', 'deep dish'); takeOrder('chicken', 'thin crust'); takeOrder('meatballs', 'hand-tossed'); takeOrder('anchovies', 'thin crust'); console.log('Sub-Total: £' + getSubTotal(orderCount).toFixed(2)); console.log('Tax: £' + getTax().toFixed(2)); console.log('Total: £' + getTotal().toFixed(2)); /* -----------------CALCULATE BILL ------------------------- The function takes a bill, a VAT percent and a tip percent and returns the total bill, formatted as a currency string. ---------------------------------------------------------- */ function calculateBill (bill, VATrate, TipRate) { var tip = (TipRate / 100) * bill; var vat = (VATrate / 100) * bill; return '£' + (bill + tip + vat).toFixed(2); } var result = calculateBill(88, 18, 15); console.log('The total bill in this example is: ' + result);
factorial
identifier_name
examples.js
/*****************.REDUCE (callback function) **********************/ //reduce() method applies a function against an accumulator and each element in the array (from left to right) to reduce //it to a single value. var numbers = [2,4,5,7,8,9]; var sum = numbers.reduce(function(runningTotal, num) { return runningTotal += num; }); console.log(sum); // sum is 35 // var sum = [0, 1, 2, 3].reduce(function (a, b) { return a + b; }, 0); // sum is 6 /*****************FOREACH (callback function)**********************/ //forEach() method executes a provided function once for each array element var numbers = [2,4,5,7,8,9]; numbers.forEach(function(num) { console.log(num); }); /*****************.MAP (callback function)*********************/ //The map() method creates a new array with the results of calling a provided function on every element in this array. var numbers = [1, 4, 9]; var roots = numbers.map(Math.sqrt); // roots is now [1, 2, 3] // numbers is still [1, 4, 9] var numbers = [2,4,5,7,8,9]; var squares = numbers.map(function (num) { return num * num; }); console.log(squares); // outputs [4, 16, 25, 49, 64, 81] /*****************RECURSION**********************/ function factorial(n) { if (n === 0) { return 1; } // This is it! Recursion!! return n * factorial(n - 1); } console.log(factorial(10)); /*****************FOR LOOPS**********************/ // print all odd numbers between 300 and 333 for(var i = 300; i <= 333; i++) { if(i % 2 !== 0) { console.log(i); } } // fizz buzz question // print 1 to 100 any num / by 3 print fizz, / 5 buzz and both fizzbuzz for (var i = 1; i <= 100; i++) { var words = ""; if (i % 3 === 0 && i % 5 === 0) { words += "FizzBuzz"; } else if (i % 3 === 0) { words += "Fizz"; } else if(i % 5 === 0) { words += "Buzz"; } else { words += i; } console.log(words); } /*****************WHILE LOOPS**********************/ // PRINT EVEN NUMBERS BETWEEN 10 AND 40 var count = 10; while(count <= 40){ console.log(count); count+=2; } // PRINT ALL ODD NUMBERS BETWEEN 300 333 var count = 300; while(count <=333){ if(count % 2 === 0){ console.log(count); } count += 1; } /*****************FUNCTIONS**********************/ // Replace - with _ within a string function kebabToSnake(str) { //replace all '-' with '_' var newStr = str.replace(/-/g , "_"); //return str return newStr; } function wordsToSentence(words) { //words is an array of strings //return a string that is all of the words concatenated together //spaces need to be between each word //example: ['Hello', 'world!'] -> 'Hello world!' return words.join(' '); } function combineNames(firstName, lastName) { //return firstName and lastName combined as one string and separated by a space. //'Lambda', 'School' -> 'Lambda School' var fullName = firstName + ' ' + lastName; return fullName; } function getGreeting(name) { //Take the name string and concatenate other strings onto it so it takes the following form: //'Sam' -> 'Hello Sam!' var greeting = 'Hello ' + name + '!'; return greeting; } function getRectangleArea(length, width) { //return the area of the rectangle by using length and width var area = length * width; return area; } function getRectangularPrismVolume(length, width, height) { //return the volume of the 3D rectangular prism given the length, width, and height var prismed = length * width * height; return prismed; } /*****************REVERSE A STRING **********************/ // This splits the string, reverses it and joins it again function FirstReverse(str) { // code goes here var splitString = str.split(""); var reverseArray = splitString.reverse(); var joinArray = reverseArray.join(""); console.log(joinArray); } // keep this function call here FirstReverse("hello"); /*****************CHECK NUMBER IS PRIME **********************/ function primeChecker (num) { if (num < 1) return false; var newNum = Math.floor(Math.sqrt(num)); for (var i = 2; i <= newNum; i++) { if (num % i === 0) { return false; } } return true; } console.log(primeChecker(15)); /*****************FUNCTIONS + IF ELSE**********************/ function fizzBuzz(num) { //if num is divisible by 3 return 'fizz' //if num is divisible by 5 return 'buzz' //if num is divisible by 3 & 5 return 'fizzbuzz' //otherwise return num if(num % 3 === 0 && num % 5 === 0) { return 'fizzbuzz'; } else if(num % 3 === 0) { return 'fizz'; }else if(num % 5 === 0) { return 'buzz'; } else { return num; } } function colorOf(r,g,b){ var red = r.toString(16); if (red.length === 1) red = "0" + red; var green = g.toString(16); if (green.length === 1) green = "0" + green; var blue = b.toString(16); if (blue.length === 1) blue = "0" + blue; return "#" + red + green + blue; } colorOf(255,0,0); // outputs '#ff0000' // turns numbers to a hex code string function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers for(var i = 2; i < num; i++) { if(num % i === 0)
} return num > 1; } function isPrime(num) { //return true if num is prime. //otherwise return false //hint: a prime number is only evenly divisible by itself and 1 //hint2: you can solve this using a for loop //note: 0 and 1 are NOT considered prime numbers if(num < 2) return false; for (var i = 2; i < num; i++) { if(num%i==0) return false; } return true; } function personAge(yearOfBirth) { var age = 2017 - yearOfBirth; if(age >= 16) { console.log("you can drive"); } else { console.log("you are not old enough to drive"); } } personAge(2011); //find the middle of a random odd or even string function mid(str) { var middle = Math.floor(str.length / 2); if(str.length % 2 === 0){ return str[middle - 1] + str[middle]; } else { return str[middle]; } } console.log(mid('computer')); //access array items function uefaEuro2016(teams, scores){ // your code... if(scores[0] === scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.'; } else if(scores[0] > scores[1]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!'; } else if(scores[1] > scores[0]) { return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!'; } } (uefaEuro2016(['Germany', 'Ukraine'], [2, 0]) /*****************FUNCTIONS + FOR LOOP**********************/ function averageTestScore(testScores) { //testScores is an array. Iterate over testScores and compute the average. //return the average var average = 0; for (var i = 0; i < testScores.length; i++) { average = average + testScores[i]; } average = average / testScores.length; return average; } // LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY function pickIt(arr){ var odd=[],even=[]; for (var i = 0; i < arr.length; i++){ if (arr[i] % 2 === 1) { odd.push(arr[i]); } else { even.push(arr[i]); } } return [odd,even]; } pickIt(6,9); // LOOP IN A LOOP :O console.log('******************************'); var twoDimensionArray = [ ['Amul', 'Amanda', 'Harriet'], ['Anisa', 'Mauro', 'Matty'], ['Jonnie', 'Emily', 'Nick'] ]; for( var i = 0; i < twoDimensionArray.length; i++ ){ var namesArr = twoDimensionArray[i]; for( var x = 0; x < namesArr.length; x++ ){ console.log( twoDimensionArray[i][x] + ' is in array ' + i + ' in the twoDimensional array and has index ' + x + ' in its own array'); } } //function with a for loop, if statement, break and continue function grabDoll(dolls){ var bag=[]; for(var i = 0; i < dolls.length; i++) { if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll") bag.push(dolls[i]); else continue; if(bag.length === 3) break; } return bag; } /*****************FUNCTIONS + IF ELSE + FOR **********************/ function largestNumber(numbers) { //numbers is an array of integers //return the largest integer var largestNum = 0; for (var i = 0; i < numbers.length; i++) { if (numbers[i] > largestNum) { largestNum = numbers[i]; } } return largestNum; } /*****************FUNCTIONS + OBJECTS**********************/ function makeCat(name, age) { //create a new object with a name property with the value set to the name argument //add an age property to the object with the value set to the age argument //add a method called meow that returns the string 'Meow!' //return the object var newCat = { name: name, age: age, meow:function () { return 'Meow!'; } }; return newCat; } function newUser(name, email, password) { //create a new object with properties matching the arguments passed in. //return the new object var newObj = { name: name, email: email, password: password }; return newObj; } var languages = { english: 'Welcome', czech: 'Vitejte', danish: 'Velkomst', dutch: 'Welkom', estonian: 'Tere tulemast', finnish: 'Tervetuloa', flemish: 'Welgekomen', french: 'Bienvenue', german: 'Willkommen', irish: 'Failte', italian: 'Benvenuto', latvian: 'Gaidits', lithuanian: 'Laukiamas', polish: 'Witamy', spanish: 'Bienvenido', swedish: 'Valkommen', welsh: 'Croeso' } function greet(language) { for(var key in languages){ if(key === language){ return languages[key]; } } return "Welcome"; } /*****************FUNCTIONS INSIDE FUNCTIONS**********************/ function addCalculateDiscountPriceMethod(storeItem) { //add a method to the storeItem object called 'calculateDiscountPrice' //this method should multiply the storeItem's 'price' and 'discountPercentage' to get the discount //the method then subtracts the discount from the price and returns the discounted price //example: //price -> 20 //discountPercentage -> .2 //discountPrice = 20 - (20 * .2) storeItem.calculateDiscountPrice = function () { var discount = storeItem.price * storeItem.discountPercentage; storeItem.discountPrice = storeItem.price - discount; return storeItem.discountPrice; }; return storeItem; } /*****************FUNCTIONS PIZZA ORDER**********************/ // pizza order function var orderCount=0; function takeOrder(topping, crustType) { console.log('Order: ' + crustType + ' pizza topped with ' + topping); orderCount=orderCount + 1; } function getSubTotal(itemCount) { return itemCount*8.97; } function getTax() { return getSubTotal(orderCount) * 0.20; } function getTotal() { return getSubTotal(orderCount) + getTax(); } takeOrder('bacon', 'thin crust'); takeOrder('pepperoni', 'hand-tossed'); takeOrder('sausage', 'deep dish'); takeOrder('chicken', 'thin crust'); takeOrder('meatballs', 'hand-tossed'); takeOrder('anchovies', 'thin crust'); console.log('Sub-Total: £' + getSubTotal(orderCount).toFixed(2)); console.log('Tax: £' + getTax().toFixed(2)); console.log('Total: £' + getTotal().toFixed(2)); /* -----------------CALCULATE BILL ------------------------- The function takes a bill, a VAT percent and a tip percent and returns the total bill, formatted as a currency string. ---------------------------------------------------------- */ function calculateBill (bill, VATrate, TipRate) { var tip = (TipRate / 100) * bill; var vat = (VATrate / 100) * bill; return '£' + (bill + tip + vat).toFixed(2); } var result = calculateBill(88, 18, 15); console.log('The total bill in this example is: ' + result);
{ return false; }
conditional_block
header.go
package rpm import ( "bufio" "bytes" "context" "encoding/binary" "errors" "fmt" "io" "unsafe" ) // See the reference material at // https://rpm-software-management.github.io/rpm/manual/. // Header is a parsed RPM header. type Header struct { tags *io.SectionReader data *io.SectionReader Infos []EntryInfo region Tag } /* The header blob is _almost_ what's described in sec. 2.4 of the File Format doc, with some caveats: - There's no magic header, version, and reserved block. It starts at the "INDEXCOUNT" entry. */ // These are some sizes that are useful when examining the header disk format. const ( entryInfoSize = 16 // sizeof(uint32)*4 preambleSize = 8 // sizeof(uint32)*2 ) // ParseHeader is equivalent to // // var h Header // err := h.Parse(ctx, r) // return &h, err func ParseHeader(ctx context.Context, r io.ReaderAt) (*Header, error) { var h Header if err := h.Parse(ctx, r); err != nil { return nil, err } return &h, nil } // Parse takes a ReaderAt containing an RPM header and loads the // entries. // // The ReaderAt must stay available throughout the lifetime of the Header. func (h *Header) Parse(ctx context.Context, r io.ReaderAt) error { if err := h.loadArenas(ctx, r); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } var isBDB bool switch err := h.verifyRegion(ctx); { case errors.Is(err, nil): case errors.Is(err, errNoRegion): isBDB = true default: return fmt.Errorf("rpm: failed to parse header: %w", err) } if err := h.verifyInfo(ctx, isBDB); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } return nil } // ReadData returns a copy of the data indicated by the passed EntryInfo. // // If an error is not reported, the returned interface{} is the type indicated by the // EntryInfo's "Type" member. // // NB The TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64, and TypeI18nString // all return slices. func (h *Header) ReadData(ctx context.Context, e *EntryInfo) (interface{}, error) { // TODO(hank) Provide a generic function like `func[T any](*Header, *EntryInfo) T` to do this. switch e.Type { case TypeBin: if /* is region */ false { return nil, errors.New("todo: handle region tags") } b := make([]byte, e.count) if _, err := h.data.ReadAt(b, int64(e.offset)); err != nil { return nil, fmt.Errorf("rpm: header: error reading binary: %w", err) } return b, nil case TypeI18nString, TypeStringArray: sc := bufio.NewScanner(io.NewSectionReader(h.data, int64(e.offset), -1)) sc.Split(splitCString) s := make([]string, int(e.count)) for i, lim := 0, int(e.count); i < lim && sc.Scan(); i++ { s[i] = sc.Text() } if err := sc.Err(); err != nil { return nil, fmt.Errorf("rpm: header: error reading string array: %w", err) } return s, nil case TypeString: // C-terminated string. r := bufio.NewReader(io.NewSectionReader(h.data, int64(e.offset), -1)) s, err := r.ReadString(0x00) if err != nil { return nil, fmt.Errorf("rpm: header: error reading string: %w", err) } // ReadString includes the delimiter, be sure to remove it. return s[:len(s)-1], nil case TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64: sr := io.NewSectionReader(h.data, int64(e.offset), -1) switch e.Type { case TypeInt64: r := make([]uint64, int(e.count)) b := make([]byte, 8) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = binary.BigEndian.Uint64(b) } return r, nil case TypeInt32: r := make([]int32, int(e.count)) b := make([]byte, 4) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int32(binary.BigEndian.Uint32(b)) } return r, nil case TypeInt16: r := make([]int16, int(e.count)) b := make([]byte, 2) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int16(binary.BigEndian.Uint16(b)) } return r, nil case TypeInt8: b := make([]byte, int(e.count)) if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading int8: %w", err) } // Despite byte == uint8 and uint8 being convertible to int8, this is // the only way I can figure out to avoid an extra copy or using a // ByteReader, which would just have an internal buffer and be slower. r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b)) return r, nil case TypeChar: // Char and Bin are different because they're offset differently. r := make([]byte, int(e.count)) if _, err := sr.ReadAt(r, 0); err != nil { return nil, fmt.Errorf("rpm: header: error reading char: %w", err) } return r, nil } panic("unreachable") default: } return nil, fmt.Errorf("unknown type: %v", e.Type) } // SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1). func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\x00'); i >= 0 { return i + 1, data[0:i], nil } if atEOF { return len(data), data, nil } return 0, nil, nil } func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error { const ( headerSz = 8 tagsMax = 0x0000ffff dataMax = 0x0fffffff sizeMax = 256 * 1024 * 1024 ) b := make([]byte, headerSz) if _, err := r.ReadAt(b, 0); err != nil { return fmt.Errorf("header: failed to read: %w", err) } tagsCt := binary.BigEndian.Uint32(b[0:]) dataSz := binary.BigEndian.Uint32(b[4:]) if tagsCt > tagsMax { return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt) } if dataSz > dataMax { return fmt.Errorf("header botch: data length (%d) out of range", dataSz) } tagsSz := int64(tagsCt) * entryInfoSize // Sanity check, if possible: var inSz int64 switch v := r.(type) { case interface{ Size() int64 }: // Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these. inSz = v.Size() case io.Seeker: // Seek if present. var err error inSz, err = v.Seek(0, io.SeekEnd) if err != nil { return err } default: // Do a read for the end of the segment. end := preambleSize + tagsSz + int64(dataSz) if _, err := r.ReadAt(b, end-int64(len(b))); err != nil { return err } inSz = end } if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz { return fmt.Errorf("not enough data") } if tagsCt == 0 { return fmt.Errorf("no tags") } h.tags = io.NewSectionReader(r, headerSz, tagsSz) h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz)) h.Infos = make([]EntryInfo, tagsCt) return nil } // ErrNoRegion is a signal back from verifyRegion that the first tag is not one // of the expected ones. // // This being reported means that the region verification has been // short-circuited. var errNoRegion = errors.New("no initial region tag, this is probably a bdb database") func (h *Header) verifyRegion(ctx context.Context) error { const regionTagCount = 16 region, err := h.loadTag(ctx, 0) if err != nil { return err } switch region.Tag { case TagHeaderSignatures: case TagHeaderImmutable: case TagHeaderImage: default: return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion) } if region.Type != TypeBin || region.count != regionTagCount { return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count) } if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) { return fmt.Errorf("nonsense region offset") } var trailer EntryInfo b := make([]byte, entryInfoSize) if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil { return err } if err := trailer.UnmarshalBinary(b); err != nil { return err } rDataLen := region.offset + regionTagCount trailer.offset = -trailer.offset // trailer offset is negative and special rIdxLen := trailer.offset / entryInfoSize // Fixup copied out of librpm: if region.Tag == TagHeaderSignatures && trailer.Tag == TagHeaderImage { trailer.Tag = TagHeaderSignatures } if trailer.Tag != region.Tag || trailer.Type != TypeRegionTag || trailer.count != regionTagCount { return fmt.Errorf("bad region trailer: %v", trailer) } if (trailer.offset%entryInfoSize != 0) || int64(rIdxLen) > h.tags.Size() || int64(rDataLen) > h.data.Size() { return fmt.Errorf("region %d size incorrect: ril %d il %d rdl %d dl %d", region.Tag, rIdxLen, h.tags.Size(), rDataLen, h.data.Size()) } h.region = region.Tag return nil } // VerifyInfo verifies the "info" segments in the header. // // Experimentally, bdb database aren't always sorted the expected way. The // passed boolean controls whether this method uses lax verification or not. func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error { lim := len(h.Infos) typecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage var prev int32 start := 1 if isBDB { start-- } for i := start; i < lim; i++ { e, err := h.loadTag(ctx, i) if err != nil { return err } switch { case prev > e.offset: return fmt.Errorf("botched entry: prev > offset (%d > %d)", prev, e.offset) case e.Tag < TagHeaderI18nTable && !isBDB: return fmt.Errorf("botched entry: bad tag %v (%[1]d < %d)", e.Tag, TagHeaderI18nTable) case e.Type < TypeMin || e.Type > TypeMax: return fmt.Errorf("botched entry: bad type %v", e.Type) case e.count == 0 || int64(e.count) > h.data.Size(): return fmt.Errorf("botched entry: bad count %d", e.count) case (e.Type.alignment()-1)&e.offset != 0: return fmt.Errorf("botched entry: weird alignment: type alignment %d, offset %d", e.Type.alignment(), e.offset) case e.offset < 0 || int64(e.offset) > h.data.Size(): return fmt.Errorf("botched entry: bad offset %d", e.offset) case typecheck && !checkTagType(e.Tag, e.Type): return fmt.Errorf("botched entry: typecheck fail: %v is not %v", e.Tag, e.Type) } } return nil } func checkTagType(key Tag, typ Kind) bool { if i, ok := tagByValue[key]; ok { t := tagTable[i].Type // Check the type. Some versions of string are typed incorrectly in a // compatible way. return t == typ || t.class() == typ.class() } // Unknown tags get a pass. return true } func (h *Header) loadTag(ctx context.Context, i int) (*EntryInfo, error) { e := &h.Infos[i] if e.Tag == Tag(0) { b := make([]byte, entryInfoSize) if _, err := h.tags.ReadAt(b, int64(i)*entryInfoSize); err != nil { return nil, fmt.Errorf("header: error reading EntryInfo: %w", err) } if err := e.UnmarshalBinary(b); err != nil { return nil, fmt.Errorf("header: martian EntryInfo: %w", err) } } return e, nil } // EntryInfo describes an entry for the given Tag. type EntryInfo struct { Tag Tag Type Kind offset int32 count uint32 } func (e *EntryInfo) String() string { return fmt.Sprintf("tag %v type %v offset %d count %d", e.Tag, e.Type, e.offset, e.count) } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (e *EntryInfo)
(b []byte) error { if len(b) < 16 { return io.ErrShortBuffer } e.Tag = Tag(int32(binary.BigEndian.Uint32(b[0:4]))) e.Type = Kind(binary.BigEndian.Uint32(b[4:8])) e.offset = int32(binary.BigEndian.Uint32(b[8:12])) e.count = binary.BigEndian.Uint32(b[12:16]) return nil }
UnmarshalBinary
identifier_name
header.go
package rpm import ( "bufio" "bytes" "context" "encoding/binary" "errors" "fmt" "io" "unsafe" ) // See the reference material at // https://rpm-software-management.github.io/rpm/manual/. // Header is a parsed RPM header. type Header struct { tags *io.SectionReader data *io.SectionReader Infos []EntryInfo region Tag } /* The header blob is _almost_ what's described in sec. 2.4 of the File Format doc, with some caveats: - There's no magic header, version, and reserved block. It starts at the "INDEXCOUNT" entry. */ // These are some sizes that are useful when examining the header disk format. const ( entryInfoSize = 16 // sizeof(uint32)*4 preambleSize = 8 // sizeof(uint32)*2 ) // ParseHeader is equivalent to // // var h Header // err := h.Parse(ctx, r) // return &h, err func ParseHeader(ctx context.Context, r io.ReaderAt) (*Header, error) { var h Header if err := h.Parse(ctx, r); err != nil { return nil, err } return &h, nil } // Parse takes a ReaderAt containing an RPM header and loads the // entries. // // The ReaderAt must stay available throughout the lifetime of the Header. func (h *Header) Parse(ctx context.Context, r io.ReaderAt) error { if err := h.loadArenas(ctx, r); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } var isBDB bool switch err := h.verifyRegion(ctx); { case errors.Is(err, nil): case errors.Is(err, errNoRegion): isBDB = true default: return fmt.Errorf("rpm: failed to parse header: %w", err) } if err := h.verifyInfo(ctx, isBDB); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } return nil } // ReadData returns a copy of the data indicated by the passed EntryInfo. // // If an error is not reported, the returned interface{} is the type indicated by the // EntryInfo's "Type" member. // // NB The TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64, and TypeI18nString // all return slices. func (h *Header) ReadData(ctx context.Context, e *EntryInfo) (interface{}, error) { // TODO(hank) Provide a generic function like `func[T any](*Header, *EntryInfo) T` to do this. switch e.Type { case TypeBin: if /* is region */ false { return nil, errors.New("todo: handle region tags") } b := make([]byte, e.count) if _, err := h.data.ReadAt(b, int64(e.offset)); err != nil { return nil, fmt.Errorf("rpm: header: error reading binary: %w", err) } return b, nil case TypeI18nString, TypeStringArray: sc := bufio.NewScanner(io.NewSectionReader(h.data, int64(e.offset), -1)) sc.Split(splitCString) s := make([]string, int(e.count)) for i, lim := 0, int(e.count); i < lim && sc.Scan(); i++ { s[i] = sc.Text() } if err := sc.Err(); err != nil { return nil, fmt.Errorf("rpm: header: error reading string array: %w", err) } return s, nil case TypeString: // C-terminated string. r := bufio.NewReader(io.NewSectionReader(h.data, int64(e.offset), -1)) s, err := r.ReadString(0x00) if err != nil { return nil, fmt.Errorf("rpm: header: error reading string: %w", err) } // ReadString includes the delimiter, be sure to remove it. return s[:len(s)-1], nil case TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64: sr := io.NewSectionReader(h.data, int64(e.offset), -1) switch e.Type { case TypeInt64: r := make([]uint64, int(e.count)) b := make([]byte, 8) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = binary.BigEndian.Uint64(b) } return r, nil case TypeInt32: r := make([]int32, int(e.count)) b := make([]byte, 4) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int32(binary.BigEndian.Uint32(b)) } return r, nil case TypeInt16: r := make([]int16, int(e.count)) b := make([]byte, 2) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int16(binary.BigEndian.Uint16(b)) } return r, nil case TypeInt8: b := make([]byte, int(e.count)) if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading int8: %w", err) } // Despite byte == uint8 and uint8 being convertible to int8, this is // the only way I can figure out to avoid an extra copy or using a // ByteReader, which would just have an internal buffer and be slower. r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b)) return r, nil case TypeChar: // Char and Bin are different because they're offset differently. r := make([]byte, int(e.count)) if _, err := sr.ReadAt(r, 0); err != nil { return nil, fmt.Errorf("rpm: header: error reading char: %w", err) } return r, nil } panic("unreachable") default: } return nil, fmt.Errorf("unknown type: %v", e.Type) } // SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1). func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\x00'); i >= 0 { return i + 1, data[0:i], nil } if atEOF { return len(data), data, nil } return 0, nil, nil } func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error { const ( headerSz = 8 tagsMax = 0x0000ffff dataMax = 0x0fffffff sizeMax = 256 * 1024 * 1024 ) b := make([]byte, headerSz) if _, err := r.ReadAt(b, 0); err != nil { return fmt.Errorf("header: failed to read: %w", err) } tagsCt := binary.BigEndian.Uint32(b[0:]) dataSz := binary.BigEndian.Uint32(b[4:]) if tagsCt > tagsMax { return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt) } if dataSz > dataMax { return fmt.Errorf("header botch: data length (%d) out of range", dataSz) } tagsSz := int64(tagsCt) * entryInfoSize // Sanity check, if possible: var inSz int64 switch v := r.(type) { case interface{ Size() int64 }: // Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these. inSz = v.Size() case io.Seeker: // Seek if present. var err error inSz, err = v.Seek(0, io.SeekEnd) if err != nil { return err } default: // Do a read for the end of the segment. end := preambleSize + tagsSz + int64(dataSz) if _, err := r.ReadAt(b, end-int64(len(b))); err != nil { return err } inSz = end } if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz { return fmt.Errorf("not enough data") } if tagsCt == 0 { return fmt.Errorf("no tags") } h.tags = io.NewSectionReader(r, headerSz, tagsSz) h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz)) h.Infos = make([]EntryInfo, tagsCt) return nil } // ErrNoRegion is a signal back from verifyRegion that the first tag is not one // of the expected ones. // // This being reported means that the region verification has been // short-circuited. var errNoRegion = errors.New("no initial region tag, this is probably a bdb database") func (h *Header) verifyRegion(ctx context.Context) error { const regionTagCount = 16 region, err := h.loadTag(ctx, 0) if err != nil { return err } switch region.Tag { case TagHeaderSignatures: case TagHeaderImmutable: case TagHeaderImage: default: return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion) } if region.Type != TypeBin || region.count != regionTagCount { return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count) } if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) { return fmt.Errorf("nonsense region offset") } var trailer EntryInfo b := make([]byte, entryInfoSize) if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil { return err } if err := trailer.UnmarshalBinary(b); err != nil { return err } rDataLen := region.offset + regionTagCount trailer.offset = -trailer.offset // trailer offset is negative and special rIdxLen := trailer.offset / entryInfoSize // Fixup copied out of librpm: if region.Tag == TagHeaderSignatures && trailer.Tag == TagHeaderImage { trailer.Tag = TagHeaderSignatures } if trailer.Tag != region.Tag || trailer.Type != TypeRegionTag || trailer.count != regionTagCount { return fmt.Errorf("bad region trailer: %v", trailer) } if (trailer.offset%entryInfoSize != 0) || int64(rIdxLen) > h.tags.Size() || int64(rDataLen) > h.data.Size() { return fmt.Errorf("region %d size incorrect: ril %d il %d rdl %d dl %d", region.Tag, rIdxLen, h.tags.Size(), rDataLen, h.data.Size()) } h.region = region.Tag return nil } // VerifyInfo verifies the "info" segments in the header. // // Experimentally, bdb database aren't always sorted the expected way. The // passed boolean controls whether this method uses lax verification or not. func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error { lim := len(h.Infos) typecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage var prev int32 start := 1 if isBDB { start-- } for i := start; i < lim; i++ { e, err := h.loadTag(ctx, i) if err != nil { return err } switch { case prev > e.offset: return fmt.Errorf("botched entry: prev > offset (%d > %d)", prev, e.offset) case e.Tag < TagHeaderI18nTable && !isBDB: return fmt.Errorf("botched entry: bad tag %v (%[1]d < %d)", e.Tag, TagHeaderI18nTable) case e.Type < TypeMin || e.Type > TypeMax: return fmt.Errorf("botched entry: bad type %v", e.Type) case e.count == 0 || int64(e.count) > h.data.Size(): return fmt.Errorf("botched entry: bad count %d", e.count) case (e.Type.alignment()-1)&e.offset != 0: return fmt.Errorf("botched entry: weird alignment: type alignment %d, offset %d", e.Type.alignment(), e.offset) case e.offset < 0 || int64(e.offset) > h.data.Size(): return fmt.Errorf("botched entry: bad offset %d", e.offset) case typecheck && !checkTagType(e.Tag, e.Type): return fmt.Errorf("botched entry: typecheck fail: %v is not %v", e.Tag, e.Type) } } return nil } func checkTagType(key Tag, typ Kind) bool
func (h *Header) loadTag(ctx context.Context, i int) (*EntryInfo, error) { e := &h.Infos[i] if e.Tag == Tag(0) { b := make([]byte, entryInfoSize) if _, err := h.tags.ReadAt(b, int64(i)*entryInfoSize); err != nil { return nil, fmt.Errorf("header: error reading EntryInfo: %w", err) } if err := e.UnmarshalBinary(b); err != nil { return nil, fmt.Errorf("header: martian EntryInfo: %w", err) } } return e, nil } // EntryInfo describes an entry for the given Tag. type EntryInfo struct { Tag Tag Type Kind offset int32 count uint32 } func (e *EntryInfo) String() string { return fmt.Sprintf("tag %v type %v offset %d count %d", e.Tag, e.Type, e.offset, e.count) } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (e *EntryInfo) UnmarshalBinary(b []byte) error { if len(b) < 16 { return io.ErrShortBuffer } e.Tag = Tag(int32(binary.BigEndian.Uint32(b[0:4]))) e.Type = Kind(binary.BigEndian.Uint32(b[4:8])) e.offset = int32(binary.BigEndian.Uint32(b[8:12])) e.count = binary.BigEndian.Uint32(b[12:16]) return nil }
{ if i, ok := tagByValue[key]; ok { t := tagTable[i].Type // Check the type. Some versions of string are typed incorrectly in a // compatible way. return t == typ || t.class() == typ.class() } // Unknown tags get a pass. return true }
identifier_body
header.go
package rpm import ( "bufio" "bytes" "context" "encoding/binary" "errors" "fmt" "io" "unsafe" ) // See the reference material at // https://rpm-software-management.github.io/rpm/manual/. // Header is a parsed RPM header. type Header struct { tags *io.SectionReader data *io.SectionReader Infos []EntryInfo region Tag } /* The header blob is _almost_ what's described in sec. 2.4 of the File Format doc, with some caveats: - There's no magic header, version, and reserved block. It starts at the "INDEXCOUNT" entry. */ // These are some sizes that are useful when examining the header disk format. const ( entryInfoSize = 16 // sizeof(uint32)*4 preambleSize = 8 // sizeof(uint32)*2 ) // ParseHeader is equivalent to // // var h Header // err := h.Parse(ctx, r) // return &h, err func ParseHeader(ctx context.Context, r io.ReaderAt) (*Header, error) { var h Header if err := h.Parse(ctx, r); err != nil { return nil, err } return &h, nil } // Parse takes a ReaderAt containing an RPM header and loads the // entries. // // The ReaderAt must stay available throughout the lifetime of the Header. func (h *Header) Parse(ctx context.Context, r io.ReaderAt) error { if err := h.loadArenas(ctx, r); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } var isBDB bool switch err := h.verifyRegion(ctx); { case errors.Is(err, nil): case errors.Is(err, errNoRegion): isBDB = true default: return fmt.Errorf("rpm: failed to parse header: %w", err) } if err := h.verifyInfo(ctx, isBDB); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } return nil } // ReadData returns a copy of the data indicated by the passed EntryInfo. // // If an error is not reported, the returned interface{} is the type indicated by the // EntryInfo's "Type" member. // // NB The TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64, and TypeI18nString // all return slices. func (h *Header) ReadData(ctx context.Context, e *EntryInfo) (interface{}, error) { // TODO(hank) Provide a generic function like `func[T any](*Header, *EntryInfo) T` to do this. switch e.Type { case TypeBin: if /* is region */ false { return nil, errors.New("todo: handle region tags") } b := make([]byte, e.count) if _, err := h.data.ReadAt(b, int64(e.offset)); err != nil { return nil, fmt.Errorf("rpm: header: error reading binary: %w", err) } return b, nil case TypeI18nString, TypeStringArray: sc := bufio.NewScanner(io.NewSectionReader(h.data, int64(e.offset), -1)) sc.Split(splitCString) s := make([]string, int(e.count)) for i, lim := 0, int(e.count); i < lim && sc.Scan(); i++ { s[i] = sc.Text() } if err := sc.Err(); err != nil { return nil, fmt.Errorf("rpm: header: error reading string array: %w", err) } return s, nil case TypeString: // C-terminated string. r := bufio.NewReader(io.NewSectionReader(h.data, int64(e.offset), -1)) s, err := r.ReadString(0x00) if err != nil { return nil, fmt.Errorf("rpm: header: error reading string: %w", err) } // ReadString includes the delimiter, be sure to remove it. return s[:len(s)-1], nil case TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64: sr := io.NewSectionReader(h.data, int64(e.offset), -1) switch e.Type { case TypeInt64: r := make([]uint64, int(e.count)) b := make([]byte, 8) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = binary.BigEndian.Uint64(b) } return r, nil case TypeInt32: r := make([]int32, int(e.count)) b := make([]byte, 4) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int32(binary.BigEndian.Uint32(b)) } return r, nil case TypeInt16: r := make([]int16, int(e.count)) b := make([]byte, 2) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int16(binary.BigEndian.Uint16(b)) } return r, nil case TypeInt8: b := make([]byte, int(e.count)) if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading int8: %w", err) } // Despite byte == uint8 and uint8 being convertible to int8, this is // the only way I can figure out to avoid an extra copy or using a // ByteReader, which would just have an internal buffer and be slower. r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b)) return r, nil case TypeChar: // Char and Bin are different because they're offset differently. r := make([]byte, int(e.count)) if _, err := sr.ReadAt(r, 0); err != nil { return nil, fmt.Errorf("rpm: header: error reading char: %w", err) } return r, nil } panic("unreachable") default: } return nil, fmt.Errorf("unknown type: %v", e.Type) } // SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1). func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\x00'); i >= 0 { return i + 1, data[0:i], nil } if atEOF { return len(data), data, nil } return 0, nil, nil } func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error { const ( headerSz = 8 tagsMax = 0x0000ffff dataMax = 0x0fffffff sizeMax = 256 * 1024 * 1024 ) b := make([]byte, headerSz) if _, err := r.ReadAt(b, 0); err != nil { return fmt.Errorf("header: failed to read: %w", err) } tagsCt := binary.BigEndian.Uint32(b[0:]) dataSz := binary.BigEndian.Uint32(b[4:]) if tagsCt > tagsMax
if dataSz > dataMax { return fmt.Errorf("header botch: data length (%d) out of range", dataSz) } tagsSz := int64(tagsCt) * entryInfoSize // Sanity check, if possible: var inSz int64 switch v := r.(type) { case interface{ Size() int64 }: // Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these. inSz = v.Size() case io.Seeker: // Seek if present. var err error inSz, err = v.Seek(0, io.SeekEnd) if err != nil { return err } default: // Do a read for the end of the segment. end := preambleSize + tagsSz + int64(dataSz) if _, err := r.ReadAt(b, end-int64(len(b))); err != nil { return err } inSz = end } if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz { return fmt.Errorf("not enough data") } if tagsCt == 0 { return fmt.Errorf("no tags") } h.tags = io.NewSectionReader(r, headerSz, tagsSz) h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz)) h.Infos = make([]EntryInfo, tagsCt) return nil } // ErrNoRegion is a signal back from verifyRegion that the first tag is not one // of the expected ones. // // This being reported means that the region verification has been // short-circuited. var errNoRegion = errors.New("no initial region tag, this is probably a bdb database") func (h *Header) verifyRegion(ctx context.Context) error { const regionTagCount = 16 region, err := h.loadTag(ctx, 0) if err != nil { return err } switch region.Tag { case TagHeaderSignatures: case TagHeaderImmutable: case TagHeaderImage: default: return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion) } if region.Type != TypeBin || region.count != regionTagCount { return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count) } if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) { return fmt.Errorf("nonsense region offset") } var trailer EntryInfo b := make([]byte, entryInfoSize) if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil { return err } if err := trailer.UnmarshalBinary(b); err != nil { return err } rDataLen := region.offset + regionTagCount trailer.offset = -trailer.offset // trailer offset is negative and special rIdxLen := trailer.offset / entryInfoSize // Fixup copied out of librpm: if region.Tag == TagHeaderSignatures && trailer.Tag == TagHeaderImage { trailer.Tag = TagHeaderSignatures } if trailer.Tag != region.Tag || trailer.Type != TypeRegionTag || trailer.count != regionTagCount { return fmt.Errorf("bad region trailer: %v", trailer) } if (trailer.offset%entryInfoSize != 0) || int64(rIdxLen) > h.tags.Size() || int64(rDataLen) > h.data.Size() { return fmt.Errorf("region %d size incorrect: ril %d il %d rdl %d dl %d", region.Tag, rIdxLen, h.tags.Size(), rDataLen, h.data.Size()) } h.region = region.Tag return nil } // VerifyInfo verifies the "info" segments in the header. // // Experimentally, bdb database aren't always sorted the expected way. The // passed boolean controls whether this method uses lax verification or not. func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error { lim := len(h.Infos) typecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage var prev int32 start := 1 if isBDB { start-- } for i := start; i < lim; i++ { e, err := h.loadTag(ctx, i) if err != nil { return err } switch { case prev > e.offset: return fmt.Errorf("botched entry: prev > offset (%d > %d)", prev, e.offset) case e.Tag < TagHeaderI18nTable && !isBDB: return fmt.Errorf("botched entry: bad tag %v (%[1]d < %d)", e.Tag, TagHeaderI18nTable) case e.Type < TypeMin || e.Type > TypeMax: return fmt.Errorf("botched entry: bad type %v", e.Type) case e.count == 0 || int64(e.count) > h.data.Size(): return fmt.Errorf("botched entry: bad count %d", e.count) case (e.Type.alignment()-1)&e.offset != 0: return fmt.Errorf("botched entry: weird alignment: type alignment %d, offset %d", e.Type.alignment(), e.offset) case e.offset < 0 || int64(e.offset) > h.data.Size(): return fmt.Errorf("botched entry: bad offset %d", e.offset) case typecheck && !checkTagType(e.Tag, e.Type): return fmt.Errorf("botched entry: typecheck fail: %v is not %v", e.Tag, e.Type) } } return nil } func checkTagType(key Tag, typ Kind) bool { if i, ok := tagByValue[key]; ok { t := tagTable[i].Type // Check the type. Some versions of string are typed incorrectly in a // compatible way. return t == typ || t.class() == typ.class() } // Unknown tags get a pass. return true } func (h *Header) loadTag(ctx context.Context, i int) (*EntryInfo, error) { e := &h.Infos[i] if e.Tag == Tag(0) { b := make([]byte, entryInfoSize) if _, err := h.tags.ReadAt(b, int64(i)*entryInfoSize); err != nil { return nil, fmt.Errorf("header: error reading EntryInfo: %w", err) } if err := e.UnmarshalBinary(b); err != nil { return nil, fmt.Errorf("header: martian EntryInfo: %w", err) } } return e, nil } // EntryInfo describes an entry for the given Tag. type EntryInfo struct { Tag Tag Type Kind offset int32 count uint32 } func (e *EntryInfo) String() string { return fmt.Sprintf("tag %v type %v offset %d count %d", e.Tag, e.Type, e.offset, e.count) } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (e *EntryInfo) UnmarshalBinary(b []byte) error { if len(b) < 16 { return io.ErrShortBuffer } e.Tag = Tag(int32(binary.BigEndian.Uint32(b[0:4]))) e.Type = Kind(binary.BigEndian.Uint32(b[4:8])) e.offset = int32(binary.BigEndian.Uint32(b[8:12])) e.count = binary.BigEndian.Uint32(b[12:16]) return nil }
{ return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt) }
conditional_block
header.go
package rpm import ( "bufio" "bytes" "context" "encoding/binary" "errors" "fmt" "io" "unsafe" ) // See the reference material at // https://rpm-software-management.github.io/rpm/manual/. // Header is a parsed RPM header. type Header struct { tags *io.SectionReader data *io.SectionReader Infos []EntryInfo region Tag } /* The header blob is _almost_ what's described in sec. 2.4 of the File Format doc, with some caveats: - There's no magic header, version, and reserved block. It starts at the "INDEXCOUNT" entry. */ // These are some sizes that are useful when examining the header disk format. const ( entryInfoSize = 16 // sizeof(uint32)*4 preambleSize = 8 // sizeof(uint32)*2 ) // ParseHeader is equivalent to // // var h Header // err := h.Parse(ctx, r) // return &h, err func ParseHeader(ctx context.Context, r io.ReaderAt) (*Header, error) { var h Header if err := h.Parse(ctx, r); err != nil { return nil, err } return &h, nil } // Parse takes a ReaderAt containing an RPM header and loads the // entries. // // The ReaderAt must stay available throughout the lifetime of the Header. func (h *Header) Parse(ctx context.Context, r io.ReaderAt) error { if err := h.loadArenas(ctx, r); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } var isBDB bool switch err := h.verifyRegion(ctx); { case errors.Is(err, nil): case errors.Is(err, errNoRegion): isBDB = true default: return fmt.Errorf("rpm: failed to parse header: %w", err) } if err := h.verifyInfo(ctx, isBDB); err != nil { return fmt.Errorf("rpm: failed to parse header: %w", err) } return nil } // ReadData returns a copy of the data indicated by the passed EntryInfo. // // If an error is not reported, the returned interface{} is the type indicated by the // EntryInfo's "Type" member. // // NB The TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64, and TypeI18nString // all return slices. func (h *Header) ReadData(ctx context.Context, e *EntryInfo) (interface{}, error) { // TODO(hank) Provide a generic function like `func[T any](*Header, *EntryInfo) T` to do this. switch e.Type { case TypeBin: if /* is region */ false { return nil, errors.New("todo: handle region tags") } b := make([]byte, e.count) if _, err := h.data.ReadAt(b, int64(e.offset)); err != nil { return nil, fmt.Errorf("rpm: header: error reading binary: %w", err) } return b, nil case TypeI18nString, TypeStringArray: sc := bufio.NewScanner(io.NewSectionReader(h.data, int64(e.offset), -1)) sc.Split(splitCString) s := make([]string, int(e.count)) for i, lim := 0, int(e.count); i < lim && sc.Scan(); i++ { s[i] = sc.Text() } if err := sc.Err(); err != nil { return nil, fmt.Errorf("rpm: header: error reading string array: %w", err) } return s, nil case TypeString: // C-terminated string. r := bufio.NewReader(io.NewSectionReader(h.data, int64(e.offset), -1)) s, err := r.ReadString(0x00) if err != nil { return nil, fmt.Errorf("rpm: header: error reading string: %w", err) } // ReadString includes the delimiter, be sure to remove it. return s[:len(s)-1], nil case TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64: sr := io.NewSectionReader(h.data, int64(e.offset), -1) switch e.Type { case TypeInt64: r := make([]uint64, int(e.count)) b := make([]byte, 8) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = binary.BigEndian.Uint64(b) } return r, nil case TypeInt32: r := make([]int32, int(e.count)) b := make([]byte, 4) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int32(binary.BigEndian.Uint32(b)) } return r, nil case TypeInt16: r := make([]int16, int(e.count)) b := make([]byte, 2) for i := range r { if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err) } r[i] = int16(binary.BigEndian.Uint16(b)) } return r, nil case TypeInt8: b := make([]byte, int(e.count)) if _, err := io.ReadFull(sr, b); err != nil { return nil, fmt.Errorf("rpm: header: error reading int8: %w", err) } // Despite byte == uint8 and uint8 being convertible to int8, this is // the only way I can figure out to avoid an extra copy or using a // ByteReader, which would just have an internal buffer and be slower. r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b)) return r, nil case TypeChar: // Char and Bin are different because they're offset differently. r := make([]byte, int(e.count)) if _, err := sr.ReadAt(r, 0); err != nil { return nil, fmt.Errorf("rpm: header: error reading char: %w", err) } return r, nil } panic("unreachable") default: } return nil, fmt.Errorf("unknown type: %v", e.Type) } // SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1). func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\x00'); i >= 0 { return i + 1, data[0:i], nil } if atEOF { return len(data), data, nil } return 0, nil, nil } func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error { const ( headerSz = 8 tagsMax = 0x0000ffff dataMax = 0x0fffffff sizeMax = 256 * 1024 * 1024 ) b := make([]byte, headerSz) if _, err := r.ReadAt(b, 0); err != nil { return fmt.Errorf("header: failed to read: %w", err) } tagsCt := binary.BigEndian.Uint32(b[0:]) dataSz := binary.BigEndian.Uint32(b[4:]) if tagsCt > tagsMax { return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt) } if dataSz > dataMax { return fmt.Errorf("header botch: data length (%d) out of range", dataSz) } tagsSz := int64(tagsCt) * entryInfoSize // Sanity check, if possible: var inSz int64 switch v := r.(type) { case interface{ Size() int64 }: // Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these. inSz = v.Size() case io.Seeker: // Seek if present. var err error inSz, err = v.Seek(0, io.SeekEnd) if err != nil { return err } default: // Do a read for the end of the segment. end := preambleSize + tagsSz + int64(dataSz) if _, err := r.ReadAt(b, end-int64(len(b))); err != nil { return err } inSz = end } if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz { return fmt.Errorf("not enough data") } if tagsCt == 0 { return fmt.Errorf("no tags") } h.tags = io.NewSectionReader(r, headerSz, tagsSz) h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz)) h.Infos = make([]EntryInfo, tagsCt) return nil } // ErrNoRegion is a signal back from verifyRegion that the first tag is not one // of the expected ones. // // This being reported means that the region verification has been // short-circuited. var errNoRegion = errors.New("no initial region tag, this is probably a bdb database") func (h *Header) verifyRegion(ctx context.Context) error { const regionTagCount = 16 region, err := h.loadTag(ctx, 0) if err != nil { return err } switch region.Tag { case TagHeaderSignatures: case TagHeaderImmutable: case TagHeaderImage: default: return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion) } if region.Type != TypeBin || region.count != regionTagCount { return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count) } if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) { return fmt.Errorf("nonsense region offset") } var trailer EntryInfo b := make([]byte, entryInfoSize) if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil { return err } if err := trailer.UnmarshalBinary(b); err != nil { return err } rDataLen := region.offset + regionTagCount trailer.offset = -trailer.offset // trailer offset is negative and special rIdxLen := trailer.offset / entryInfoSize // Fixup copied out of librpm: if region.Tag == TagHeaderSignatures && trailer.Tag == TagHeaderImage { trailer.Tag = TagHeaderSignatures } if trailer.Tag != region.Tag || trailer.Type != TypeRegionTag || trailer.count != regionTagCount { return fmt.Errorf("bad region trailer: %v", trailer) } if (trailer.offset%entryInfoSize != 0) || int64(rIdxLen) > h.tags.Size() || int64(rDataLen) > h.data.Size() { return fmt.Errorf("region %d size incorrect: ril %d il %d rdl %d dl %d", region.Tag, rIdxLen, h.tags.Size(), rDataLen, h.data.Size()) } h.region = region.Tag return nil } // VerifyInfo verifies the "info" segments in the header. // // Experimentally, bdb database aren't always sorted the expected way. The // passed boolean controls whether this method uses lax verification or not. func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error { lim := len(h.Infos) typecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage var prev int32 start := 1 if isBDB { start-- } for i := start; i < lim; i++ {
} switch { case prev > e.offset: return fmt.Errorf("botched entry: prev > offset (%d > %d)", prev, e.offset) case e.Tag < TagHeaderI18nTable && !isBDB: return fmt.Errorf("botched entry: bad tag %v (%[1]d < %d)", e.Tag, TagHeaderI18nTable) case e.Type < TypeMin || e.Type > TypeMax: return fmt.Errorf("botched entry: bad type %v", e.Type) case e.count == 0 || int64(e.count) > h.data.Size(): return fmt.Errorf("botched entry: bad count %d", e.count) case (e.Type.alignment()-1)&e.offset != 0: return fmt.Errorf("botched entry: weird alignment: type alignment %d, offset %d", e.Type.alignment(), e.offset) case e.offset < 0 || int64(e.offset) > h.data.Size(): return fmt.Errorf("botched entry: bad offset %d", e.offset) case typecheck && !checkTagType(e.Tag, e.Type): return fmt.Errorf("botched entry: typecheck fail: %v is not %v", e.Tag, e.Type) } } return nil } func checkTagType(key Tag, typ Kind) bool { if i, ok := tagByValue[key]; ok { t := tagTable[i].Type // Check the type. Some versions of string are typed incorrectly in a // compatible way. return t == typ || t.class() == typ.class() } // Unknown tags get a pass. return true } func (h *Header) loadTag(ctx context.Context, i int) (*EntryInfo, error) { e := &h.Infos[i] if e.Tag == Tag(0) { b := make([]byte, entryInfoSize) if _, err := h.tags.ReadAt(b, int64(i)*entryInfoSize); err != nil { return nil, fmt.Errorf("header: error reading EntryInfo: %w", err) } if err := e.UnmarshalBinary(b); err != nil { return nil, fmt.Errorf("header: martian EntryInfo: %w", err) } } return e, nil } // EntryInfo describes an entry for the given Tag. type EntryInfo struct { Tag Tag Type Kind offset int32 count uint32 } func (e *EntryInfo) String() string { return fmt.Sprintf("tag %v type %v offset %d count %d", e.Tag, e.Type, e.offset, e.count) } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (e *EntryInfo) UnmarshalBinary(b []byte) error { if len(b) < 16 { return io.ErrShortBuffer } e.Tag = Tag(int32(binary.BigEndian.Uint32(b[0:4]))) e.Type = Kind(binary.BigEndian.Uint32(b[4:8])) e.offset = int32(binary.BigEndian.Uint32(b[8:12])) e.count = binary.BigEndian.Uint32(b[12:16]) return nil }
e, err := h.loadTag(ctx, i) if err != nil { return err
random_line_split
profiles.go
package tc /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import ( "database/sql" "errors" "fmt" "strconv" "strings" "time" "github.com/apache/trafficcontrol/lib/go-log" "github.com/apache/trafficcontrol/lib/go-tc/tovalidate" "github.com/apache/trafficcontrol/lib/go-util" validation "github.com/go-ozzo/ozzo-validation" "github.com/lib/pq" ) // These are the valid values for the Type property of a Profile. No other // values will be accepted, and these are not configurable. const ( CacheServerProfileType = "ATS_PROFILE" DeliveryServiceProfileType = "DS_PROFILE" ElasticSearchProfileType = "ES_PROFILE" GroveProfileType = "GROVE_PROFILE" InfluxdbProfileType = "INFLUXDB_PROFILE" KafkaProfileType = "KAFKA_PROFILE" LogstashProfileType = "LOGSTASH_PROFILE" OriginProfileType = "ORG_PROFILE" // RiakProfileType is the type of a Profile used on the legacy RiakKV system // which used to be used as a back-end for Traffic Vault. // // Deprecated: Support for Riak as a Traffic Vault back-end is being dropped // in the near future. Profiles of type UnknownProfileType should be used on // PostgreSQL database servers instead. RiakProfileType = "RIAK_PROFILE" SplunkProfileType = "SPLUNK_PROFILE" TrafficMonitorProfileType = "TM_PROFILE" TrafficPortalProfileType = "TP_PROFILE" TrafficRouterProfileType = "TR_PROFILE" TrafficStatsProfileType = "TS_PROFILE" UnkownProfileType = "UNK_PROFILE" ) // ProfilesResponse is a list of profiles returned by GET requests. type ProfilesResponse struct { Response []Profile `json:"response"` Alerts } // ProfileResponse is a single Profile Response for Update and Create to depict what changed // swagger:response ProfileResponse // in: body type ProfileResponse struct { // in: body Response Profile `json:"response"` Alerts } // A Profile represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. type Profile struct { ID int `json:"id" db:"id"` LastUpdated TimeNoMod `json:"lastUpdated"` Name string `json:"name"` Parameter string `json:"param"` Description string `json:"description"` CDNName string `json:"cdnName"` CDNID int `json:"cdn"` RoutingDisabled bool `json:"routingDisabled"` Type string `json:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfilesResponseV5 is a list of profiles returned by GET requests. type ProfilesResponseV5 struct { Response []ProfileV5 `json:"response"` Alerts } // A ProfileV5 represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. Note: Field LastUpdated represents RFC3339 type ProfileV5 struct { ID int `json:"id" db:"id"` LastUpdated time.Time `json:"lastUpdated" db:"last_updated"` Name string `json:"name" db:"name"` Description string `json:"description" db:"description"` CDNName string `json:"cdnName" db:"cdn_name"` CDNID int `json:"cdn" db:"cdn"` RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"` Type string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileNullable is exactly the same as Profile except that its fields are // reference values, so they may be nil. type ProfileNullable struct { ID *int `json:"id" db:"id"` LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"` Name *string `json:"name" db:"name"` Description *string `json:"description" db:"description"` CDNName *string `json:"cdnName" db:"cdn_name"` CDNID *int `json:"cdn" db:"cdn"` RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"` Type *string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileCopy contains details about the profile created from an existing profile. type ProfileCopy struct { ID int `json:"id"` Name string `json:"name"` ExistingID int `json:"idCopyFrom"` ExistingName string `json:"profileCopyFrom"` Description string `json:"description"` } // ProfileCopyResponse represents the Traffic Ops API's response when a Profile // is copied. type ProfileCopyResponse struct { Response ProfileCopy `json:"response"` Alerts } // ProfileExportImportNullable is an object of the form used by Traffic Ops // to represent exported and imported profiles. type ProfileExportImportNullable struct { Name *string `json:"name"` Description *string `json:"description"` CDNName *string `json:"cdn"` Type *string `json:"type"` } // ProfileExportResponse is an object of the form used by Traffic Ops // to represent exported profile response. type ProfileExportResponse struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` Alerts } // ProfileImportRequest is an object of the form used by Traffic Ops // to represent a request to import a profile. type ProfileImportRequest struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` } // ProfileImportResponse is an object of the form used by Traffic Ops // to represent a response from importing a profile. type ProfileImportResponse struct { Response ProfileImportResponseObj `json:"response"` Alerts } // ProfileImportResponseObj contains data about the profile being imported. type ProfileImportResponseObj struct { ProfileExportImportNullable ID *int `json:"id"` } // Validate validates an profile import request, implementing the // github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator // interface. func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error { profile := profileImport.Profile // Profile fields are valid errs := tovalidate.ToErrors(validation.Errors{ "name": validation.Validate(profile.Name, validation.By( func(value interface{}) error { name, ok := value.(*string) if !ok { return fmt.Errorf("wrong type, need: string, got: %T", value) } if name == nil || *name == "" { return errors.New("required and cannot be blank") } if strings.Contains(*name, " ") { return errors.New("cannot contain spaces") } return nil }, )), "description": validation.Validate(profile.Description, validation.Required), "cdnName": validation.Validate(profile.CDNName, validation.Required), "type": validation.Validate(profile.Type, validation.Required), }) // Validate CDN exist if profile.CDNName != nil { if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil { errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if !ok { errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName)) } } // Validate profile does not already exist if profile.Name != nil { if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil { errString := fmt.Sprintf("checking profile name %v existence", *profile.Name) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if ok { errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name)) } } // Validate all parameters // export/import does not include secure flag // default value to not flag on validation secure := 1 for i, pp := range profileImport.Parameters { if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 { for _, err := range ppErrs { errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error())) } } } if len(errs) > 0 { return util.JoinErrs(errs) } return nil } // ProfilesExistByIDs returns whether profiles exist for all the given ids, and any error. // TODO move to helper package. func ProfilesExistByIDs(ids []int64, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = ANY($1)`, pq.Array(ids)).Scan(&count); err != nil { return false, errors.New("querying profiles existence from id: " + err.Error()) } return count == len(ids), nil } // ProfileExistsByID returns whether a profile with the given id exists, and any error. // TODO move to helper package. func ProfileExistsByID(id int64, tx *sql.Tx) (bool, error)
// ProfileExistsByName returns whether a profile with the given name exists, and any error. // TODO move to helper package. func ProfileExistsByName(name string, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where name = $1`, name).Scan(&count); err != nil { return false, errors.New("querying profile existence from name: " + err.Error()) } return count > 0, nil }
{ count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = $1`, id).Scan(&count); err != nil { return false, errors.New("querying profile existence from id: " + err.Error()) } return count > 0, nil }
identifier_body
profiles.go
package tc /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import ( "database/sql" "errors" "fmt" "strconv" "strings" "time" "github.com/apache/trafficcontrol/lib/go-log" "github.com/apache/trafficcontrol/lib/go-tc/tovalidate" "github.com/apache/trafficcontrol/lib/go-util" validation "github.com/go-ozzo/ozzo-validation" "github.com/lib/pq" ) // These are the valid values for the Type property of a Profile. No other // values will be accepted, and these are not configurable. const ( CacheServerProfileType = "ATS_PROFILE" DeliveryServiceProfileType = "DS_PROFILE" ElasticSearchProfileType = "ES_PROFILE" GroveProfileType = "GROVE_PROFILE" InfluxdbProfileType = "INFLUXDB_PROFILE" KafkaProfileType = "KAFKA_PROFILE" LogstashProfileType = "LOGSTASH_PROFILE" OriginProfileType = "ORG_PROFILE" // RiakProfileType is the type of a Profile used on the legacy RiakKV system // which used to be used as a back-end for Traffic Vault. // // Deprecated: Support for Riak as a Traffic Vault back-end is being dropped // in the near future. Profiles of type UnknownProfileType should be used on // PostgreSQL database servers instead. RiakProfileType = "RIAK_PROFILE" SplunkProfileType = "SPLUNK_PROFILE" TrafficMonitorProfileType = "TM_PROFILE" TrafficPortalProfileType = "TP_PROFILE" TrafficRouterProfileType = "TR_PROFILE" TrafficStatsProfileType = "TS_PROFILE" UnkownProfileType = "UNK_PROFILE" ) // ProfilesResponse is a list of profiles returned by GET requests. type ProfilesResponse struct { Response []Profile `json:"response"` Alerts } // ProfileResponse is a single Profile Response for Update and Create to depict what changed // swagger:response ProfileResponse // in: body type ProfileResponse struct { // in: body Response Profile `json:"response"` Alerts } // A Profile represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. type Profile struct { ID int `json:"id" db:"id"` LastUpdated TimeNoMod `json:"lastUpdated"` Name string `json:"name"` Parameter string `json:"param"` Description string `json:"description"` CDNName string `json:"cdnName"` CDNID int `json:"cdn"` RoutingDisabled bool `json:"routingDisabled"` Type string `json:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfilesResponseV5 is a list of profiles returned by GET requests. type ProfilesResponseV5 struct { Response []ProfileV5 `json:"response"` Alerts } // A ProfileV5 represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. Note: Field LastUpdated represents RFC3339 type ProfileV5 struct { ID int `json:"id" db:"id"` LastUpdated time.Time `json:"lastUpdated" db:"last_updated"` Name string `json:"name" db:"name"` Description string `json:"description" db:"description"` CDNName string `json:"cdnName" db:"cdn_name"` CDNID int `json:"cdn" db:"cdn"` RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"` Type string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileNullable is exactly the same as Profile except that its fields are // reference values, so they may be nil. type ProfileNullable struct { ID *int `json:"id" db:"id"` LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"` Name *string `json:"name" db:"name"` Description *string `json:"description" db:"description"` CDNName *string `json:"cdnName" db:"cdn_name"` CDNID *int `json:"cdn" db:"cdn"` RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"` Type *string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileCopy contains details about the profile created from an existing profile. type ProfileCopy struct { ID int `json:"id"` Name string `json:"name"` ExistingID int `json:"idCopyFrom"` ExistingName string `json:"profileCopyFrom"` Description string `json:"description"` } // ProfileCopyResponse represents the Traffic Ops API's response when a Profile // is copied. type ProfileCopyResponse struct { Response ProfileCopy `json:"response"` Alerts } // ProfileExportImportNullable is an object of the form used by Traffic Ops // to represent exported and imported profiles. type ProfileExportImportNullable struct { Name *string `json:"name"` Description *string `json:"description"` CDNName *string `json:"cdn"` Type *string `json:"type"` } // ProfileExportResponse is an object of the form used by Traffic Ops // to represent exported profile response. type ProfileExportResponse struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` Alerts } // ProfileImportRequest is an object of the form used by Traffic Ops // to represent a request to import a profile. type ProfileImportRequest struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` } // ProfileImportResponse is an object of the form used by Traffic Ops // to represent a response from importing a profile. type ProfileImportResponse struct { Response ProfileImportResponseObj `json:"response"` Alerts } // ProfileImportResponseObj contains data about the profile being imported. type ProfileImportResponseObj struct { ProfileExportImportNullable ID *int `json:"id"` } // Validate validates an profile import request, implementing the // github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator // interface. func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error { profile := profileImport.Profile // Profile fields are valid errs := tovalidate.ToErrors(validation.Errors{ "name": validation.Validate(profile.Name, validation.By( func(value interface{}) error { name, ok := value.(*string) if !ok { return fmt.Errorf("wrong type, need: string, got: %T", value) } if name == nil || *name == "" { return errors.New("required and cannot be blank") } if strings.Contains(*name, " ") { return errors.New("cannot contain spaces") } return nil }, )), "description": validation.Validate(profile.Description, validation.Required), "cdnName": validation.Validate(profile.CDNName, validation.Required), "type": validation.Validate(profile.Type, validation.Required), }) // Validate CDN exist if profile.CDNName != nil
// Validate profile does not already exist if profile.Name != nil { if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil { errString := fmt.Sprintf("checking profile name %v existence", *profile.Name) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if ok { errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name)) } } // Validate all parameters // export/import does not include secure flag // default value to not flag on validation secure := 1 for i, pp := range profileImport.Parameters { if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 { for _, err := range ppErrs { errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error())) } } } if len(errs) > 0 { return util.JoinErrs(errs) } return nil } // ProfilesExistByIDs returns whether profiles exist for all the given ids, and any error. // TODO move to helper package. func ProfilesExistByIDs(ids []int64, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = ANY($1)`, pq.Array(ids)).Scan(&count); err != nil { return false, errors.New("querying profiles existence from id: " + err.Error()) } return count == len(ids), nil } // ProfileExistsByID returns whether a profile with the given id exists, and any error. // TODO move to helper package. func ProfileExistsByID(id int64, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = $1`, id).Scan(&count); err != nil { return false, errors.New("querying profile existence from id: " + err.Error()) } return count > 0, nil } // ProfileExistsByName returns whether a profile with the given name exists, and any error. // TODO move to helper package. func ProfileExistsByName(name string, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where name = $1`, name).Scan(&count); err != nil { return false, errors.New("querying profile existence from name: " + err.Error()) } return count > 0, nil }
{ if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil { errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if !ok { errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName)) } }
conditional_block
profiles.go
package tc /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import ( "database/sql" "errors" "fmt" "strconv" "strings" "time" "github.com/apache/trafficcontrol/lib/go-log" "github.com/apache/trafficcontrol/lib/go-tc/tovalidate" "github.com/apache/trafficcontrol/lib/go-util" validation "github.com/go-ozzo/ozzo-validation" "github.com/lib/pq" ) // These are the valid values for the Type property of a Profile. No other // values will be accepted, and these are not configurable. const ( CacheServerProfileType = "ATS_PROFILE" DeliveryServiceProfileType = "DS_PROFILE" ElasticSearchProfileType = "ES_PROFILE" GroveProfileType = "GROVE_PROFILE" InfluxdbProfileType = "INFLUXDB_PROFILE" KafkaProfileType = "KAFKA_PROFILE" LogstashProfileType = "LOGSTASH_PROFILE" OriginProfileType = "ORG_PROFILE" // RiakProfileType is the type of a Profile used on the legacy RiakKV system // which used to be used as a back-end for Traffic Vault. // // Deprecated: Support for Riak as a Traffic Vault back-end is being dropped // in the near future. Profiles of type UnknownProfileType should be used on // PostgreSQL database servers instead. RiakProfileType = "RIAK_PROFILE" SplunkProfileType = "SPLUNK_PROFILE" TrafficMonitorProfileType = "TM_PROFILE" TrafficPortalProfileType = "TP_PROFILE" TrafficRouterProfileType = "TR_PROFILE" TrafficStatsProfileType = "TS_PROFILE" UnkownProfileType = "UNK_PROFILE" ) // ProfilesResponse is a list of profiles returned by GET requests. type ProfilesResponse struct { Response []Profile `json:"response"` Alerts } // ProfileResponse is a single Profile Response for Update and Create to depict what changed // swagger:response ProfileResponse // in: body type ProfileResponse struct { // in: body Response Profile `json:"response"` Alerts } // A Profile represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. type Profile struct { ID int `json:"id" db:"id"` LastUpdated TimeNoMod `json:"lastUpdated"` Name string `json:"name"` Parameter string `json:"param"` Description string `json:"description"` CDNName string `json:"cdnName"` CDNID int `json:"cdn"` RoutingDisabled bool `json:"routingDisabled"` Type string `json:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfilesResponseV5 is a list of profiles returned by GET requests. type ProfilesResponseV5 struct { Response []ProfileV5 `json:"response"` Alerts } // A ProfileV5 represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. Note: Field LastUpdated represents RFC3339 type ProfileV5 struct { ID int `json:"id" db:"id"` LastUpdated time.Time `json:"lastUpdated" db:"last_updated"` Name string `json:"name" db:"name"` Description string `json:"description" db:"description"` CDNName string `json:"cdnName" db:"cdn_name"` CDNID int `json:"cdn" db:"cdn"` RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"` Type string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileNullable is exactly the same as Profile except that its fields are // reference values, so they may be nil. type ProfileNullable struct { ID *int `json:"id" db:"id"` LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"` Name *string `json:"name" db:"name"` Description *string `json:"description" db:"description"` CDNName *string `json:"cdnName" db:"cdn_name"` CDNID *int `json:"cdn" db:"cdn"` RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"` Type *string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileCopy contains details about the profile created from an existing profile. type ProfileCopy struct {
} // ProfileCopyResponse represents the Traffic Ops API's response when a Profile // is copied. type ProfileCopyResponse struct { Response ProfileCopy `json:"response"` Alerts } // ProfileExportImportNullable is an object of the form used by Traffic Ops // to represent exported and imported profiles. type ProfileExportImportNullable struct { Name *string `json:"name"` Description *string `json:"description"` CDNName *string `json:"cdn"` Type *string `json:"type"` } // ProfileExportResponse is an object of the form used by Traffic Ops // to represent exported profile response. type ProfileExportResponse struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` Alerts } // ProfileImportRequest is an object of the form used by Traffic Ops // to represent a request to import a profile. type ProfileImportRequest struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` } // ProfileImportResponse is an object of the form used by Traffic Ops // to represent a response from importing a profile. type ProfileImportResponse struct { Response ProfileImportResponseObj `json:"response"` Alerts } // ProfileImportResponseObj contains data about the profile being imported. type ProfileImportResponseObj struct { ProfileExportImportNullable ID *int `json:"id"` } // Validate validates an profile import request, implementing the // github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator // interface. func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error { profile := profileImport.Profile // Profile fields are valid errs := tovalidate.ToErrors(validation.Errors{ "name": validation.Validate(profile.Name, validation.By( func(value interface{}) error { name, ok := value.(*string) if !ok { return fmt.Errorf("wrong type, need: string, got: %T", value) } if name == nil || *name == "" { return errors.New("required and cannot be blank") } if strings.Contains(*name, " ") { return errors.New("cannot contain spaces") } return nil }, )), "description": validation.Validate(profile.Description, validation.Required), "cdnName": validation.Validate(profile.CDNName, validation.Required), "type": validation.Validate(profile.Type, validation.Required), }) // Validate CDN exist if profile.CDNName != nil { if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil { errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if !ok { errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName)) } } // Validate profile does not already exist if profile.Name != nil { if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil { errString := fmt.Sprintf("checking profile name %v existence", *profile.Name) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if ok { errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name)) } } // Validate all parameters // export/import does not include secure flag // default value to not flag on validation secure := 1 for i, pp := range profileImport.Parameters { if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 { for _, err := range ppErrs { errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error())) } } } if len(errs) > 0 { return util.JoinErrs(errs) } return nil } // ProfilesExistByIDs returns whether profiles exist for all the given ids, and any error. // TODO move to helper package. func ProfilesExistByIDs(ids []int64, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = ANY($1)`, pq.Array(ids)).Scan(&count); err != nil { return false, errors.New("querying profiles existence from id: " + err.Error()) } return count == len(ids), nil } // ProfileExistsByID returns whether a profile with the given id exists, and any error. // TODO move to helper package. func ProfileExistsByID(id int64, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = $1`, id).Scan(&count); err != nil { return false, errors.New("querying profile existence from id: " + err.Error()) } return count > 0, nil } // ProfileExistsByName returns whether a profile with the given name exists, and any error. // TODO move to helper package. func ProfileExistsByName(name string, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where name = $1`, name).Scan(&count); err != nil { return false, errors.New("querying profile existence from name: " + err.Error()) } return count > 0, nil }
ID int `json:"id"` Name string `json:"name"` ExistingID int `json:"idCopyFrom"` ExistingName string `json:"profileCopyFrom"` Description string `json:"description"`
random_line_split
profiles.go
package tc /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import ( "database/sql" "errors" "fmt" "strconv" "strings" "time" "github.com/apache/trafficcontrol/lib/go-log" "github.com/apache/trafficcontrol/lib/go-tc/tovalidate" "github.com/apache/trafficcontrol/lib/go-util" validation "github.com/go-ozzo/ozzo-validation" "github.com/lib/pq" ) // These are the valid values for the Type property of a Profile. No other // values will be accepted, and these are not configurable. const ( CacheServerProfileType = "ATS_PROFILE" DeliveryServiceProfileType = "DS_PROFILE" ElasticSearchProfileType = "ES_PROFILE" GroveProfileType = "GROVE_PROFILE" InfluxdbProfileType = "INFLUXDB_PROFILE" KafkaProfileType = "KAFKA_PROFILE" LogstashProfileType = "LOGSTASH_PROFILE" OriginProfileType = "ORG_PROFILE" // RiakProfileType is the type of a Profile used on the legacy RiakKV system // which used to be used as a back-end for Traffic Vault. // // Deprecated: Support for Riak as a Traffic Vault back-end is being dropped // in the near future. Profiles of type UnknownProfileType should be used on // PostgreSQL database servers instead. RiakProfileType = "RIAK_PROFILE" SplunkProfileType = "SPLUNK_PROFILE" TrafficMonitorProfileType = "TM_PROFILE" TrafficPortalProfileType = "TP_PROFILE" TrafficRouterProfileType = "TR_PROFILE" TrafficStatsProfileType = "TS_PROFILE" UnkownProfileType = "UNK_PROFILE" ) // ProfilesResponse is a list of profiles returned by GET requests. type ProfilesResponse struct { Response []Profile `json:"response"` Alerts } // ProfileResponse is a single Profile Response for Update and Create to depict what changed // swagger:response ProfileResponse // in: body type ProfileResponse struct { // in: body Response Profile `json:"response"` Alerts } // A Profile represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. type Profile struct { ID int `json:"id" db:"id"` LastUpdated TimeNoMod `json:"lastUpdated"` Name string `json:"name"` Parameter string `json:"param"` Description string `json:"description"` CDNName string `json:"cdnName"` CDNID int `json:"cdn"` RoutingDisabled bool `json:"routingDisabled"` Type string `json:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfilesResponseV5 is a list of profiles returned by GET requests. type ProfilesResponseV5 struct { Response []ProfileV5 `json:"response"` Alerts } // A ProfileV5 represents a set of configuration for a server or Delivery Service // which may be reused to allow sharing configuration across the objects to // which it is assigned. Note: Field LastUpdated represents RFC3339 type ProfileV5 struct { ID int `json:"id" db:"id"` LastUpdated time.Time `json:"lastUpdated" db:"last_updated"` Name string `json:"name" db:"name"` Description string `json:"description" db:"description"` CDNName string `json:"cdnName" db:"cdn_name"` CDNID int `json:"cdn" db:"cdn"` RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"` Type string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileNullable is exactly the same as Profile except that its fields are // reference values, so they may be nil. type ProfileNullable struct { ID *int `json:"id" db:"id"` LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"` Name *string `json:"name" db:"name"` Description *string `json:"description" db:"description"` CDNName *string `json:"cdnName" db:"cdn_name"` CDNID *int `json:"cdn" db:"cdn"` RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"` Type *string `json:"type" db:"type"` Parameters []ParameterNullable `json:"params,omitempty"` } // ProfileCopy contains details about the profile created from an existing profile. type ProfileCopy struct { ID int `json:"id"` Name string `json:"name"` ExistingID int `json:"idCopyFrom"` ExistingName string `json:"profileCopyFrom"` Description string `json:"description"` } // ProfileCopyResponse represents the Traffic Ops API's response when a Profile // is copied. type ProfileCopyResponse struct { Response ProfileCopy `json:"response"` Alerts } // ProfileExportImportNullable is an object of the form used by Traffic Ops // to represent exported and imported profiles. type ProfileExportImportNullable struct { Name *string `json:"name"` Description *string `json:"description"` CDNName *string `json:"cdn"` Type *string `json:"type"` } // ProfileExportResponse is an object of the form used by Traffic Ops // to represent exported profile response. type ProfileExportResponse struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` Alerts } // ProfileImportRequest is an object of the form used by Traffic Ops // to represent a request to import a profile. type ProfileImportRequest struct { // Parameters associated to the profile // Profile ProfileExportImportNullable `json:"profile"` // Parameters associated to the profile // Parameters []ProfileExportImportParameterNullable `json:"parameters"` } // ProfileImportResponse is an object of the form used by Traffic Ops // to represent a response from importing a profile. type ProfileImportResponse struct { Response ProfileImportResponseObj `json:"response"` Alerts } // ProfileImportResponseObj contains data about the profile being imported. type ProfileImportResponseObj struct { ProfileExportImportNullable ID *int `json:"id"` } // Validate validates an profile import request, implementing the // github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator // interface. func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error { profile := profileImport.Profile // Profile fields are valid errs := tovalidate.ToErrors(validation.Errors{ "name": validation.Validate(profile.Name, validation.By( func(value interface{}) error { name, ok := value.(*string) if !ok { return fmt.Errorf("wrong type, need: string, got: %T", value) } if name == nil || *name == "" { return errors.New("required and cannot be blank") } if strings.Contains(*name, " ") { return errors.New("cannot contain spaces") } return nil }, )), "description": validation.Validate(profile.Description, validation.Required), "cdnName": validation.Validate(profile.CDNName, validation.Required), "type": validation.Validate(profile.Type, validation.Required), }) // Validate CDN exist if profile.CDNName != nil { if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil { errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if !ok { errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName)) } } // Validate profile does not already exist if profile.Name != nil { if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil { errString := fmt.Sprintf("checking profile name %v existence", *profile.Name) log.Errorf("%v: %v", errString, err.Error()) errs = append(errs, errors.New(errString)) } else if ok { errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name)) } } // Validate all parameters // export/import does not include secure flag // default value to not flag on validation secure := 1 for i, pp := range profileImport.Parameters { if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 { for _, err := range ppErrs { errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error())) } } } if len(errs) > 0 { return util.JoinErrs(errs) } return nil } // ProfilesExistByIDs returns whether profiles exist for all the given ids, and any error. // TODO move to helper package. func
(ids []int64, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = ANY($1)`, pq.Array(ids)).Scan(&count); err != nil { return false, errors.New("querying profiles existence from id: " + err.Error()) } return count == len(ids), nil } // ProfileExistsByID returns whether a profile with the given id exists, and any error. // TODO move to helper package. func ProfileExistsByID(id int64, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where id = $1`, id).Scan(&count); err != nil { return false, errors.New("querying profile existence from id: " + err.Error()) } return count > 0, nil } // ProfileExistsByName returns whether a profile with the given name exists, and any error. // TODO move to helper package. func ProfileExistsByName(name string, tx *sql.Tx) (bool, error) { count := 0 if err := tx.QueryRow(`SELECT count(*) from profile where name = $1`, name).Scan(&count); err != nil { return false, errors.New("querying profile existence from name: " + err.Error()) } return count > 0, nil }
ProfilesExistByIDs
identifier_name
lib.rs
//! Securely zero memory with a simple trait ([Zeroize]) built on stable Rust //! primitives which guarantee the operation will not be 'optimized away'. //! //! ## Usage //! //! ``` //! use zeroize::Zeroize; //! //! fn main() { //! // Protip: don't embed secrets in your source code. //! // This is just an example. //! let mut secret = b"Air shield password: 1,2,3,4,5".to_vec(); //! // [ ... ] open the air shield here //! //! // Now that we're done using the secret, zero it out. //! secret.zeroize(); //! } //! ``` //! //! The [Zeroize] trait is impl'd on all of Rust's core scalar types including //! integers, floats, `bool`, and `char`. //! //! Additionally, it's implemented on slices and `IterMut`s of the above types. //! //! When the `std` feature is enabled (which it is by default), it's also impl'd //! for `Vec`s of the above types as well as `String`, where it provides //! [Vec::clear()] / [String::clear()]-like behavior (truncating to zero-length) //! but ensures the backing memory is securely zeroed. //! //! The [DefaultIsZeroes] marker trait can be impl'd on types which also //! impl [Default], which implements [Zeroize] by overwriting a value with //! the default value. //! //! ## Custom Derive Support //! //! This crate has custom derive support for the `Zeroize` trait, which //! automatically calls `zeroize()` on all members of a struct or tuple struct: //! //! ``` //! // Ensure you import the crate with `macro_use`: //! // #[macro_use] //! // extern crate zeroize; //! //! use zeroize::Zeroize; //! //! #[derive(Zeroize)] //! struct MyStruct([u8; 64]); //! ``` //! //! Additionally, you can derive `ZeroizeOnDrop`, which will automatically //! derive a `Drop` handler that calls `zeroize()`: //! //! ``` //! use zeroize::{Zeroize, ZeroizeOnDrop}; //! //! // This struct will be zeroized on drop //! #[derive(Zeroize, ZeroizeOnDrop)] //! struct MyStruct([u8; 64]); //! ``` //! //! ## About //! //! [Zeroing memory securely is hard] - compilers optimize for performance, and //! in doing so they love to "optimize away" unnecessary zeroing calls. There are //! many documented "tricks" to attempt to avoid these optimizations and ensure //! that a zeroing routine is performed reliably. //! //! This crate isn't about tricks: it uses [core::ptr::write_volatile] //! and [core::sync::atomic] memory fences to provide easy-to-use, portable //! zeroing behavior which works on all of Rust's core number types and slices //! thereof, implemented in pure Rust with no usage of FFI or assembly. //! //! - **No insecure fallbacks!** //! - **No dependencies!** //! - **No FFI or inline assembly!** //! - `#![no_std]` **i.e. embedded-friendly**! //! - **No functionality besides securely zeroing memory!** //!
//! 2. Ensure all subsequent reads to the memory following the zeroing operation //! will always see zeroes. //! //! This crate guarantees #1 is true: LLVM's volatile semantics ensure it. //! //! The story around #2 is much more complicated. In brief, it should be true that //! LLVM's current implementation does not attempt to perform optimizations which //! would allow a subsequent (non-volatile) read to see the original value prior //! to zeroization. However, this is not a guarantee, but rather an LLVM //! implementation detail. //! //! For more background, we can look to the [core::ptr::write_volatile] //! documentation: //! //! > Volatile operations are intended to act on I/O memory, and are guaranteed //! > to not be elided or reordered by the compiler across other volatile //! > operations. //! > //! > Memory accessed with `read_volatile` or `write_volatile` should not be //! > accessed with non-volatile operations. //! //! Uhoh! This crate does not guarantee all reads to the memory it operates on //! are volatile, and the documentation for [core::ptr::write_volatile] //! explicitly warns against mixing volatile and non-volatile operations. //! Perhaps we'd be better off with something like a `VolatileCell` //! type which owns the associated data and ensures all reads and writes are //! volatile so we don't have to worry about the semantics of mixing volatile and //! non-volatile accesses. //! //! While that's a strategy worth pursuing (and something we may investigate //! separately from this crate), it comes with some onerous API requirements: //! it means any data that we might ever desire to zero is owned by a //! `VolatileCell`. However, this does not make it possible for this crate //! to act on references, which severely limits its applicability. In fact //! a `VolatileCell` can only act on values, i.e. to read a value from it, //! we'd need to make a copy of it, and that's literally the opposite of //! what we want. //! //! It's worth asking what the precise semantics of mixing volatile and //! non-volatile reads actually are, and whether a less obtrusive API which //! can act entirely on mutable references is possible, safe, and provides the //! desired behavior. //! //! Unfortunately, that's a tricky question, because //! [Rust does not have a formally defined memory model][memory-model], //! and the behavior of mixing volatile and non-volatile memory accesses is //! therefore not rigorously specified and winds up being an LLVM //! implementation detail. The semantics were discussed extensively in this //! thread, specifically in the context of zeroing secrets from memory: //! //! <https://internals.rust-lang.org/t/volatile-and-sensitive-memory/3188/24> //! //! Some notable details from this thread: //! //! - Rust/LLVM's notion of "volatile" is centered around data *accesses*, not //! the data itself. Specifically it maps to flags in LLVM IR which control //! the behavior of the optimizer, and is therefore a bit different from the //! typical C notion of "volatile". //! - As mentioned earlier, LLVM does not presently contain optimizations which //! would reorder a non-volatile read to occur before a volatile write. //! However, there is nothing precluding such optimizations from being added. //! LLVM presently appears to exhibit the desired behavior for both points //! #1 and #2 above, but there is nothing preventing future versions of Rust //! and/or LLVM from changing that. //! //! To help mitigate concerns about reordering potentially exposing secrets //! after they have been zeroed, this crate leverages the [core::sync::atomic] //! memory fence functions including [compiler_fence] and [fence] (which uses //! the CPU's native fence instructions). These fences are leveraged with the //! strictest ordering guarantees, [Ordering::SeqCst], which ensures no //! accesses are reordered. Without a formally defined memory model we can't //! guarantee these will be effective, but we hope they will cover most cases. //! //! Concretely the threat of leaking "zeroized" secrets (via reordering by //! LLVM and/or the CPU via out-of-order or speculative execution) would //! require a non-volatile access to be reordered ahead of the following: //! //! 1. before an [Ordering::SeqCst] compiler fence //! 2. before an [Ordering::SeqCst] runtime fence //! 3. before a volatile write //! //! This seems unlikely, but our usage of mixed non-volatile and volatile //! accesses is technically undefined behavior, at least until guarantees //! about this particular mixture of operations is formally defined in a //! Rust memory model. //! //! Furthermore, given the recent history of microarchitectural attacks //! (Spectre, Meltdown, etc), there is also potential for "zeroized" secrets //! to be leaked through covert channels (e.g. memory fences have been used //! as a covert channel), so we are wary to make guarantees unless they can //! be made firmly in terms of both a formal Rust memory model and the //! generated code for a particular CPU architecture. //! //! In conclusion, this crate guarantees the zeroize operation will not be //! elided or "optimized away", makes a "best effort" to ensure that //! memory accesses will not be reordered ahead of the "zeroize" operation, //! but **cannot** yet guarantee that such reordering will not occur. //! //! ## Stack/Heap Zeroing Notes //! //! This crate can be used to zero values from either the stack or the heap. //! //! However, be aware that Rust's current memory semantics (e.g. `Copy` types) //! can leave copies of data in memory, and there isn't presently a good solution //! for ensuring all copies of data on the stack are properly cleared. //! //! The [`Pin` RFC][pin] proposes a method for avoiding this. //! //! ## What about: clearing registers, mlock, mprotect, etc? //! //! This crate is laser-focused on being a simple, unobtrusive crate for zeroing //! memory in as reliable a manner as is possible on stable Rust. //! //! Clearing registers is a difficult problem that can't easily be solved by //! something like a crate, and requires either inline ASM or rustc support. //! See <https://github.com/rust-lang/rust/issues/17046> for background on //! this particular problem. //! //! Other memory protection mechanisms are interesting and useful, but often //! overkill (e.g. defending against RAM scraping or attackers with swap access). //! In as much as there may be merit to these approaches, there are also many //! other crates that already implement more sophisticated memory protections. //! Such protections are explicitly out-of-scope for this crate. //! //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote //! it in the most unobtrusive manner possible. This includes omitting complex //! `unsafe` memory protection systems and just trying to make the best memory //! zeroing crate available. //! //! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html //! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear //! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear //! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html //! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html //! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html //! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html //! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst //! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html //! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html //! [memory-model]: https://github.com/nikomatsakis/rust-memory-model //! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md //! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data #![no_std] #![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)] #![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))] #![cfg_attr(feature = "nightly", feature(core_intrinsics))] #![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")] #[cfg(any(feature = "std", test))] #[cfg_attr(test, macro_use)] extern crate std; #[cfg(feature = "zeroize_derive")] #[allow(unused_imports)] #[macro_use] extern crate zeroize_derive; #[cfg(feature = "zeroize_derive")] #[doc(hidden)] pub use zeroize_derive::*; use core::{ptr, slice::IterMut, sync::atomic}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::prelude::*; #[cfg(feature = "std")] use std::prelude::v1::*; /// Trait for securely erasing types from memory pub trait Zeroize { /// Zero out this object from memory (using Rust or OS intrinsics which /// ensure the zeroization operation is not "optimized away") fn zeroize(&mut self); } /// Marker trait for types whose `Default` is the desired zeroization result pub trait DefaultIsZeroes: Copy + Default + Sized {} /// Marker trait intended for use with `zeroize_derive` which indicates that /// a type should have a drop handler which calls Zeroize. /// /// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an /// associated drop handler. pub trait ZeroizeOnDrop: Zeroize + Drop {} impl<Z> Zeroize for Z where Z: DefaultIsZeroes, { fn zeroize(&mut self) { volatile_set(self, Z::default()); atomic_fence(); } } macro_rules! impl_zeroize_with_default { ($($type:ty),+) => { $(impl DefaultIsZeroes for $type {})+ }; } impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize); impl_zeroize_with_default!(u16, u32, u64, u128, usize); impl_zeroize_with_default!(f32, f64, char, bool); /// On non-nightly targets, avoid special-casing u8 #[cfg(not(feature = "nightly"))] impl_zeroize_with_default!(u8); /// On nightly targets, don't implement `DefaultIsZeroes` so we can special /// case using batch set operations. #[cfg(feature = "nightly")] impl Zeroize for u8 { fn zeroize(&mut self) { volatile_set(self, 0); atomic_fence(); } } impl<'a, Z> Zeroize for IterMut<'a, Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { let default = Z::default(); for elem in self { volatile_set(elem, default); } atomic_fence(); } } /// Implement zeroize on all types that can be zeroized with the zero value impl<Z> Zeroize for [Z] where Z: DefaultIsZeroes, { fn zeroize(&mut self) { // TODO: batch volatile set operation? self.iter_mut().zeroize(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing #[cfg(feature = "nightly")] impl Zeroize for [u8] { fn zeroize(&mut self) { volatile_zero_bytes(self); atomic_fence(); } } #[cfg(feature = "alloc")] impl<Z> Zeroize for Vec<Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { self.resize(self.capacity(), Default::default()); self.as_mut_slice().zeroize(); self.clear(); } } #[cfg(feature = "alloc")] impl Zeroize for String { fn zeroize(&mut self) { unsafe { self.as_bytes_mut() }.zeroize(); debug_assert!(self.as_bytes().iter().all(|b| *b == 0)); self.clear(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing #[cfg(feature = "nightly")] macro_rules! impl_zeroize_for_byte_array { ($($size:expr),+) => { $( impl Zeroize for [u8; $size] { fn zeroize(&mut self) { volatile_zero_bytes(self.as_mut()); atomic_fence(); } } )+ }; } #[cfg(feature = "nightly")] impl_zeroize_for_byte_array!( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 ); /// Use fences to prevent accesses from being reordered before this /// point, which should hopefully help ensure that all accessors /// see zeroes after this point. #[inline] fn atomic_fence() { atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); } /// Set a mutable reference to a value to the given replacement #[inline] fn volatile_set<T: Copy + Sized>(dst: &mut T, src: T) { unsafe { ptr::write_volatile(dst, src) } } #[cfg(feature = "nightly")] #[inline] fn volatile_zero_bytes(dst: &mut [u8]) { unsafe { core::intrinsics::volatile_set_memory(dst.as_mut_ptr(), 0, dst.len()) } } #[cfg(test)] mod tests { use super::Zeroize; use std::prelude::v1::*; #[test] fn zeroize_byte_arrays() { let mut arr = [42u8; 64]; arr.zeroize(); assert_eq!(arr.as_ref(), [0u8; 64].as_ref()); } #[test] fn zeroize_vec() { let mut vec = vec![42; 3]; vec.zeroize(); assert!(vec.is_empty()); } #[test] fn zeroize_vec_past_len() { let mut vec = Vec::with_capacity(5); for i in 0..4 { vec.push(10 + i); } vec.clear(); // safe if: new_len <= capacity AND elements "were initialised" unsafe { vec.set_len(1); } assert_eq!(10, vec[0], "clear() hasn't erased our push()es"); vec.clear(); vec.zeroize(); unsafe { vec.set_len(4); } for i in 0..4 { assert_eq!(0, vec[i], "it's been zero'd"); } } #[test] fn zeroize_string() { let mut string = String::from("Hello, world!"); string.zeroize(); assert!(string.is_empty()); } #[test] fn zeroize_box() { let mut boxed_arr = Box::new([42u8; 3]); boxed_arr.zeroize(); assert_eq!(boxed_arr.as_ref(), &[0u8; 3]); } #[cfg(feature = "zeroize_derive")] mod derive { use super::*; #[derive(Zeroize)] struct ZeroizableTupleStruct([u8; 3]); #[test] fn derive_tuple_struct_test() { let mut value = ZeroizableTupleStruct([1, 2, 3]); value.zeroize(); assert_eq!(&value.0, &[0, 0, 0]) } #[derive(Zeroize)] struct ZeroizableStruct { string: String, vec: Vec<u8>, bytearray: [u8; 3], number: usize, boolean: bool, } #[test] fn derive_struct_test() { let mut value = ZeroizableStruct { string: "Hello, world!".to_owned(), vec: vec![1, 2, 3], bytearray: [4, 5, 6], number: 42, boolean: true, }; value.zeroize(); assert!(value.string.is_empty()); assert!(value.vec.is_empty()); assert_eq!(&value.bytearray, &[0, 0, 0]); assert_eq!(value.number, 0); assert!(!value.boolean); } } }
//! ## What guarantees does this crate provide? //! //! Ideally a secure memory-zeroing function would guarantee the following: //! //! 1. Ensure the zeroing operation can't be "optimized away" by the compiler.
random_line_split
lib.rs
//! Securely zero memory with a simple trait ([Zeroize]) built on stable Rust //! primitives which guarantee the operation will not be 'optimized away'. //! //! ## Usage //! //! ``` //! use zeroize::Zeroize; //! //! fn main() { //! // Protip: don't embed secrets in your source code. //! // This is just an example. //! let mut secret = b"Air shield password: 1,2,3,4,5".to_vec(); //! // [ ... ] open the air shield here //! //! // Now that we're done using the secret, zero it out. //! secret.zeroize(); //! } //! ``` //! //! The [Zeroize] trait is impl'd on all of Rust's core scalar types including //! integers, floats, `bool`, and `char`. //! //! Additionally, it's implemented on slices and `IterMut`s of the above types. //! //! When the `std` feature is enabled (which it is by default), it's also impl'd //! for `Vec`s of the above types as well as `String`, where it provides //! [Vec::clear()] / [String::clear()]-like behavior (truncating to zero-length) //! but ensures the backing memory is securely zeroed. //! //! The [DefaultIsZeroes] marker trait can be impl'd on types which also //! impl [Default], which implements [Zeroize] by overwriting a value with //! the default value. //! //! ## Custom Derive Support //! //! This crate has custom derive support for the `Zeroize` trait, which //! automatically calls `zeroize()` on all members of a struct or tuple struct: //! //! ``` //! // Ensure you import the crate with `macro_use`: //! // #[macro_use] //! // extern crate zeroize; //! //! use zeroize::Zeroize; //! //! #[derive(Zeroize)] //! struct MyStruct([u8; 64]); //! ``` //! //! Additionally, you can derive `ZeroizeOnDrop`, which will automatically //! derive a `Drop` handler that calls `zeroize()`: //! //! ``` //! use zeroize::{Zeroize, ZeroizeOnDrop}; //! //! // This struct will be zeroized on drop //! #[derive(Zeroize, ZeroizeOnDrop)] //! struct MyStruct([u8; 64]); //! ``` //! //! ## About //! //! [Zeroing memory securely is hard] - compilers optimize for performance, and //! in doing so they love to "optimize away" unnecessary zeroing calls. There are //! many documented "tricks" to attempt to avoid these optimizations and ensure //! that a zeroing routine is performed reliably. //! //! This crate isn't about tricks: it uses [core::ptr::write_volatile] //! and [core::sync::atomic] memory fences to provide easy-to-use, portable //! zeroing behavior which works on all of Rust's core number types and slices //! thereof, implemented in pure Rust with no usage of FFI or assembly. //! //! - **No insecure fallbacks!** //! - **No dependencies!** //! - **No FFI or inline assembly!** //! - `#![no_std]` **i.e. embedded-friendly**! //! - **No functionality besides securely zeroing memory!** //! //! ## What guarantees does this crate provide? //! //! Ideally a secure memory-zeroing function would guarantee the following: //! //! 1. Ensure the zeroing operation can't be "optimized away" by the compiler. //! 2. Ensure all subsequent reads to the memory following the zeroing operation //! will always see zeroes. //! //! This crate guarantees #1 is true: LLVM's volatile semantics ensure it. //! //! The story around #2 is much more complicated. In brief, it should be true that //! LLVM's current implementation does not attempt to perform optimizations which //! would allow a subsequent (non-volatile) read to see the original value prior //! to zeroization. However, this is not a guarantee, but rather an LLVM //! implementation detail. //! //! For more background, we can look to the [core::ptr::write_volatile] //! documentation: //! //! > Volatile operations are intended to act on I/O memory, and are guaranteed //! > to not be elided or reordered by the compiler across other volatile //! > operations. //! > //! > Memory accessed with `read_volatile` or `write_volatile` should not be //! > accessed with non-volatile operations. //! //! Uhoh! This crate does not guarantee all reads to the memory it operates on //! are volatile, and the documentation for [core::ptr::write_volatile] //! explicitly warns against mixing volatile and non-volatile operations. //! Perhaps we'd be better off with something like a `VolatileCell` //! type which owns the associated data and ensures all reads and writes are //! volatile so we don't have to worry about the semantics of mixing volatile and //! non-volatile accesses. //! //! While that's a strategy worth pursuing (and something we may investigate //! separately from this crate), it comes with some onerous API requirements: //! it means any data that we might ever desire to zero is owned by a //! `VolatileCell`. However, this does not make it possible for this crate //! to act on references, which severely limits its applicability. In fact //! a `VolatileCell` can only act on values, i.e. to read a value from it, //! we'd need to make a copy of it, and that's literally the opposite of //! what we want. //! //! It's worth asking what the precise semantics of mixing volatile and //! non-volatile reads actually are, and whether a less obtrusive API which //! can act entirely on mutable references is possible, safe, and provides the //! desired behavior. //! //! Unfortunately, that's a tricky question, because //! [Rust does not have a formally defined memory model][memory-model], //! and the behavior of mixing volatile and non-volatile memory accesses is //! therefore not rigorously specified and winds up being an LLVM //! implementation detail. The semantics were discussed extensively in this //! thread, specifically in the context of zeroing secrets from memory: //! //! <https://internals.rust-lang.org/t/volatile-and-sensitive-memory/3188/24> //! //! Some notable details from this thread: //! //! - Rust/LLVM's notion of "volatile" is centered around data *accesses*, not //! the data itself. Specifically it maps to flags in LLVM IR which control //! the behavior of the optimizer, and is therefore a bit different from the //! typical C notion of "volatile". //! - As mentioned earlier, LLVM does not presently contain optimizations which //! would reorder a non-volatile read to occur before a volatile write. //! However, there is nothing precluding such optimizations from being added. //! LLVM presently appears to exhibit the desired behavior for both points //! #1 and #2 above, but there is nothing preventing future versions of Rust //! and/or LLVM from changing that. //! //! To help mitigate concerns about reordering potentially exposing secrets //! after they have been zeroed, this crate leverages the [core::sync::atomic] //! memory fence functions including [compiler_fence] and [fence] (which uses //! the CPU's native fence instructions). These fences are leveraged with the //! strictest ordering guarantees, [Ordering::SeqCst], which ensures no //! accesses are reordered. Without a formally defined memory model we can't //! guarantee these will be effective, but we hope they will cover most cases. //! //! Concretely the threat of leaking "zeroized" secrets (via reordering by //! LLVM and/or the CPU via out-of-order or speculative execution) would //! require a non-volatile access to be reordered ahead of the following: //! //! 1. before an [Ordering::SeqCst] compiler fence //! 2. before an [Ordering::SeqCst] runtime fence //! 3. before a volatile write //! //! This seems unlikely, but our usage of mixed non-volatile and volatile //! accesses is technically undefined behavior, at least until guarantees //! about this particular mixture of operations is formally defined in a //! Rust memory model. //! //! Furthermore, given the recent history of microarchitectural attacks //! (Spectre, Meltdown, etc), there is also potential for "zeroized" secrets //! to be leaked through covert channels (e.g. memory fences have been used //! as a covert channel), so we are wary to make guarantees unless they can //! be made firmly in terms of both a formal Rust memory model and the //! generated code for a particular CPU architecture. //! //! In conclusion, this crate guarantees the zeroize operation will not be //! elided or "optimized away", makes a "best effort" to ensure that //! memory accesses will not be reordered ahead of the "zeroize" operation, //! but **cannot** yet guarantee that such reordering will not occur. //! //! ## Stack/Heap Zeroing Notes //! //! This crate can be used to zero values from either the stack or the heap. //! //! However, be aware that Rust's current memory semantics (e.g. `Copy` types) //! can leave copies of data in memory, and there isn't presently a good solution //! for ensuring all copies of data on the stack are properly cleared. //! //! The [`Pin` RFC][pin] proposes a method for avoiding this. //! //! ## What about: clearing registers, mlock, mprotect, etc? //! //! This crate is laser-focused on being a simple, unobtrusive crate for zeroing //! memory in as reliable a manner as is possible on stable Rust. //! //! Clearing registers is a difficult problem that can't easily be solved by //! something like a crate, and requires either inline ASM or rustc support. //! See <https://github.com/rust-lang/rust/issues/17046> for background on //! this particular problem. //! //! Other memory protection mechanisms are interesting and useful, but often //! overkill (e.g. defending against RAM scraping or attackers with swap access). //! In as much as there may be merit to these approaches, there are also many //! other crates that already implement more sophisticated memory protections. //! Such protections are explicitly out-of-scope for this crate. //! //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote //! it in the most unobtrusive manner possible. This includes omitting complex //! `unsafe` memory protection systems and just trying to make the best memory //! zeroing crate available. //! //! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html //! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear //! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear //! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html //! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html //! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html //! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html //! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst //! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html //! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html //! [memory-model]: https://github.com/nikomatsakis/rust-memory-model //! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md //! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data #![no_std] #![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)] #![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))] #![cfg_attr(feature = "nightly", feature(core_intrinsics))] #![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")] #[cfg(any(feature = "std", test))] #[cfg_attr(test, macro_use)] extern crate std; #[cfg(feature = "zeroize_derive")] #[allow(unused_imports)] #[macro_use] extern crate zeroize_derive; #[cfg(feature = "zeroize_derive")] #[doc(hidden)] pub use zeroize_derive::*; use core::{ptr, slice::IterMut, sync::atomic}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::prelude::*; #[cfg(feature = "std")] use std::prelude::v1::*; /// Trait for securely erasing types from memory pub trait Zeroize { /// Zero out this object from memory (using Rust or OS intrinsics which /// ensure the zeroization operation is not "optimized away") fn zeroize(&mut self); } /// Marker trait for types whose `Default` is the desired zeroization result pub trait DefaultIsZeroes: Copy + Default + Sized {} /// Marker trait intended for use with `zeroize_derive` which indicates that /// a type should have a drop handler which calls Zeroize. /// /// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an /// associated drop handler. pub trait ZeroizeOnDrop: Zeroize + Drop {} impl<Z> Zeroize for Z where Z: DefaultIsZeroes, { fn zeroize(&mut self) { volatile_set(self, Z::default()); atomic_fence(); } } macro_rules! impl_zeroize_with_default { ($($type:ty),+) => { $(impl DefaultIsZeroes for $type {})+ }; } impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize); impl_zeroize_with_default!(u16, u32, u64, u128, usize); impl_zeroize_with_default!(f32, f64, char, bool); /// On non-nightly targets, avoid special-casing u8 #[cfg(not(feature = "nightly"))] impl_zeroize_with_default!(u8); /// On nightly targets, don't implement `DefaultIsZeroes` so we can special /// case using batch set operations. #[cfg(feature = "nightly")] impl Zeroize for u8 { fn zeroize(&mut self) { volatile_set(self, 0); atomic_fence(); } } impl<'a, Z> Zeroize for IterMut<'a, Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { let default = Z::default(); for elem in self { volatile_set(elem, default); } atomic_fence(); } } /// Implement zeroize on all types that can be zeroized with the zero value impl<Z> Zeroize for [Z] where Z: DefaultIsZeroes, { fn zeroize(&mut self) { // TODO: batch volatile set operation? self.iter_mut().zeroize(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing #[cfg(feature = "nightly")] impl Zeroize for [u8] { fn zeroize(&mut self) { volatile_zero_bytes(self); atomic_fence(); } } #[cfg(feature = "alloc")] impl<Z> Zeroize for Vec<Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { self.resize(self.capacity(), Default::default()); self.as_mut_slice().zeroize(); self.clear(); } } #[cfg(feature = "alloc")] impl Zeroize for String { fn zeroize(&mut self) { unsafe { self.as_bytes_mut() }.zeroize(); debug_assert!(self.as_bytes().iter().all(|b| *b == 0)); self.clear(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing #[cfg(feature = "nightly")] macro_rules! impl_zeroize_for_byte_array { ($($size:expr),+) => { $( impl Zeroize for [u8; $size] { fn zeroize(&mut self) { volatile_zero_bytes(self.as_mut()); atomic_fence(); } } )+ }; } #[cfg(feature = "nightly")] impl_zeroize_for_byte_array!( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 ); /// Use fences to prevent accesses from being reordered before this /// point, which should hopefully help ensure that all accessors /// see zeroes after this point. #[inline] fn
() { atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); } /// Set a mutable reference to a value to the given replacement #[inline] fn volatile_set<T: Copy + Sized>(dst: &mut T, src: T) { unsafe { ptr::write_volatile(dst, src) } } #[cfg(feature = "nightly")] #[inline] fn volatile_zero_bytes(dst: &mut [u8]) { unsafe { core::intrinsics::volatile_set_memory(dst.as_mut_ptr(), 0, dst.len()) } } #[cfg(test)] mod tests { use super::Zeroize; use std::prelude::v1::*; #[test] fn zeroize_byte_arrays() { let mut arr = [42u8; 64]; arr.zeroize(); assert_eq!(arr.as_ref(), [0u8; 64].as_ref()); } #[test] fn zeroize_vec() { let mut vec = vec![42; 3]; vec.zeroize(); assert!(vec.is_empty()); } #[test] fn zeroize_vec_past_len() { let mut vec = Vec::with_capacity(5); for i in 0..4 { vec.push(10 + i); } vec.clear(); // safe if: new_len <= capacity AND elements "were initialised" unsafe { vec.set_len(1); } assert_eq!(10, vec[0], "clear() hasn't erased our push()es"); vec.clear(); vec.zeroize(); unsafe { vec.set_len(4); } for i in 0..4 { assert_eq!(0, vec[i], "it's been zero'd"); } } #[test] fn zeroize_string() { let mut string = String::from("Hello, world!"); string.zeroize(); assert!(string.is_empty()); } #[test] fn zeroize_box() { let mut boxed_arr = Box::new([42u8; 3]); boxed_arr.zeroize(); assert_eq!(boxed_arr.as_ref(), &[0u8; 3]); } #[cfg(feature = "zeroize_derive")] mod derive { use super::*; #[derive(Zeroize)] struct ZeroizableTupleStruct([u8; 3]); #[test] fn derive_tuple_struct_test() { let mut value = ZeroizableTupleStruct([1, 2, 3]); value.zeroize(); assert_eq!(&value.0, &[0, 0, 0]) } #[derive(Zeroize)] struct ZeroizableStruct { string: String, vec: Vec<u8>, bytearray: [u8; 3], number: usize, boolean: bool, } #[test] fn derive_struct_test() { let mut value = ZeroizableStruct { string: "Hello, world!".to_owned(), vec: vec![1, 2, 3], bytearray: [4, 5, 6], number: 42, boolean: true, }; value.zeroize(); assert!(value.string.is_empty()); assert!(value.vec.is_empty()); assert_eq!(&value.bytearray, &[0, 0, 0]); assert_eq!(value.number, 0); assert!(!value.boolean); } } }
atomic_fence
identifier_name
lib.rs
//! Securely zero memory with a simple trait ([Zeroize]) built on stable Rust //! primitives which guarantee the operation will not be 'optimized away'. //! //! ## Usage //! //! ``` //! use zeroize::Zeroize; //! //! fn main() { //! // Protip: don't embed secrets in your source code. //! // This is just an example. //! let mut secret = b"Air shield password: 1,2,3,4,5".to_vec(); //! // [ ... ] open the air shield here //! //! // Now that we're done using the secret, zero it out. //! secret.zeroize(); //! } //! ``` //! //! The [Zeroize] trait is impl'd on all of Rust's core scalar types including //! integers, floats, `bool`, and `char`. //! //! Additionally, it's implemented on slices and `IterMut`s of the above types. //! //! When the `std` feature is enabled (which it is by default), it's also impl'd //! for `Vec`s of the above types as well as `String`, where it provides //! [Vec::clear()] / [String::clear()]-like behavior (truncating to zero-length) //! but ensures the backing memory is securely zeroed. //! //! The [DefaultIsZeroes] marker trait can be impl'd on types which also //! impl [Default], which implements [Zeroize] by overwriting a value with //! the default value. //! //! ## Custom Derive Support //! //! This crate has custom derive support for the `Zeroize` trait, which //! automatically calls `zeroize()` on all members of a struct or tuple struct: //! //! ``` //! // Ensure you import the crate with `macro_use`: //! // #[macro_use] //! // extern crate zeroize; //! //! use zeroize::Zeroize; //! //! #[derive(Zeroize)] //! struct MyStruct([u8; 64]); //! ``` //! //! Additionally, you can derive `ZeroizeOnDrop`, which will automatically //! derive a `Drop` handler that calls `zeroize()`: //! //! ``` //! use zeroize::{Zeroize, ZeroizeOnDrop}; //! //! // This struct will be zeroized on drop //! #[derive(Zeroize, ZeroizeOnDrop)] //! struct MyStruct([u8; 64]); //! ``` //! //! ## About //! //! [Zeroing memory securely is hard] - compilers optimize for performance, and //! in doing so they love to "optimize away" unnecessary zeroing calls. There are //! many documented "tricks" to attempt to avoid these optimizations and ensure //! that a zeroing routine is performed reliably. //! //! This crate isn't about tricks: it uses [core::ptr::write_volatile] //! and [core::sync::atomic] memory fences to provide easy-to-use, portable //! zeroing behavior which works on all of Rust's core number types and slices //! thereof, implemented in pure Rust with no usage of FFI or assembly. //! //! - **No insecure fallbacks!** //! - **No dependencies!** //! - **No FFI or inline assembly!** //! - `#![no_std]` **i.e. embedded-friendly**! //! - **No functionality besides securely zeroing memory!** //! //! ## What guarantees does this crate provide? //! //! Ideally a secure memory-zeroing function would guarantee the following: //! //! 1. Ensure the zeroing operation can't be "optimized away" by the compiler. //! 2. Ensure all subsequent reads to the memory following the zeroing operation //! will always see zeroes. //! //! This crate guarantees #1 is true: LLVM's volatile semantics ensure it. //! //! The story around #2 is much more complicated. In brief, it should be true that //! LLVM's current implementation does not attempt to perform optimizations which //! would allow a subsequent (non-volatile) read to see the original value prior //! to zeroization. However, this is not a guarantee, but rather an LLVM //! implementation detail. //! //! For more background, we can look to the [core::ptr::write_volatile] //! documentation: //! //! > Volatile operations are intended to act on I/O memory, and are guaranteed //! > to not be elided or reordered by the compiler across other volatile //! > operations. //! > //! > Memory accessed with `read_volatile` or `write_volatile` should not be //! > accessed with non-volatile operations. //! //! Uhoh! This crate does not guarantee all reads to the memory it operates on //! are volatile, and the documentation for [core::ptr::write_volatile] //! explicitly warns against mixing volatile and non-volatile operations. //! Perhaps we'd be better off with something like a `VolatileCell` //! type which owns the associated data and ensures all reads and writes are //! volatile so we don't have to worry about the semantics of mixing volatile and //! non-volatile accesses. //! //! While that's a strategy worth pursuing (and something we may investigate //! separately from this crate), it comes with some onerous API requirements: //! it means any data that we might ever desire to zero is owned by a //! `VolatileCell`. However, this does not make it possible for this crate //! to act on references, which severely limits its applicability. In fact //! a `VolatileCell` can only act on values, i.e. to read a value from it, //! we'd need to make a copy of it, and that's literally the opposite of //! what we want. //! //! It's worth asking what the precise semantics of mixing volatile and //! non-volatile reads actually are, and whether a less obtrusive API which //! can act entirely on mutable references is possible, safe, and provides the //! desired behavior. //! //! Unfortunately, that's a tricky question, because //! [Rust does not have a formally defined memory model][memory-model], //! and the behavior of mixing volatile and non-volatile memory accesses is //! therefore not rigorously specified and winds up being an LLVM //! implementation detail. The semantics were discussed extensively in this //! thread, specifically in the context of zeroing secrets from memory: //! //! <https://internals.rust-lang.org/t/volatile-and-sensitive-memory/3188/24> //! //! Some notable details from this thread: //! //! - Rust/LLVM's notion of "volatile" is centered around data *accesses*, not //! the data itself. Specifically it maps to flags in LLVM IR which control //! the behavior of the optimizer, and is therefore a bit different from the //! typical C notion of "volatile". //! - As mentioned earlier, LLVM does not presently contain optimizations which //! would reorder a non-volatile read to occur before a volatile write. //! However, there is nothing precluding such optimizations from being added. //! LLVM presently appears to exhibit the desired behavior for both points //! #1 and #2 above, but there is nothing preventing future versions of Rust //! and/or LLVM from changing that. //! //! To help mitigate concerns about reordering potentially exposing secrets //! after they have been zeroed, this crate leverages the [core::sync::atomic] //! memory fence functions including [compiler_fence] and [fence] (which uses //! the CPU's native fence instructions). These fences are leveraged with the //! strictest ordering guarantees, [Ordering::SeqCst], which ensures no //! accesses are reordered. Without a formally defined memory model we can't //! guarantee these will be effective, but we hope they will cover most cases. //! //! Concretely the threat of leaking "zeroized" secrets (via reordering by //! LLVM and/or the CPU via out-of-order or speculative execution) would //! require a non-volatile access to be reordered ahead of the following: //! //! 1. before an [Ordering::SeqCst] compiler fence //! 2. before an [Ordering::SeqCst] runtime fence //! 3. before a volatile write //! //! This seems unlikely, but our usage of mixed non-volatile and volatile //! accesses is technically undefined behavior, at least until guarantees //! about this particular mixture of operations is formally defined in a //! Rust memory model. //! //! Furthermore, given the recent history of microarchitectural attacks //! (Spectre, Meltdown, etc), there is also potential for "zeroized" secrets //! to be leaked through covert channels (e.g. memory fences have been used //! as a covert channel), so we are wary to make guarantees unless they can //! be made firmly in terms of both a formal Rust memory model and the //! generated code for a particular CPU architecture. //! //! In conclusion, this crate guarantees the zeroize operation will not be //! elided or "optimized away", makes a "best effort" to ensure that //! memory accesses will not be reordered ahead of the "zeroize" operation, //! but **cannot** yet guarantee that such reordering will not occur. //! //! ## Stack/Heap Zeroing Notes //! //! This crate can be used to zero values from either the stack or the heap. //! //! However, be aware that Rust's current memory semantics (e.g. `Copy` types) //! can leave copies of data in memory, and there isn't presently a good solution //! for ensuring all copies of data on the stack are properly cleared. //! //! The [`Pin` RFC][pin] proposes a method for avoiding this. //! //! ## What about: clearing registers, mlock, mprotect, etc? //! //! This crate is laser-focused on being a simple, unobtrusive crate for zeroing //! memory in as reliable a manner as is possible on stable Rust. //! //! Clearing registers is a difficult problem that can't easily be solved by //! something like a crate, and requires either inline ASM or rustc support. //! See <https://github.com/rust-lang/rust/issues/17046> for background on //! this particular problem. //! //! Other memory protection mechanisms are interesting and useful, but often //! overkill (e.g. defending against RAM scraping or attackers with swap access). //! In as much as there may be merit to these approaches, there are also many //! other crates that already implement more sophisticated memory protections. //! Such protections are explicitly out-of-scope for this crate. //! //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote //! it in the most unobtrusive manner possible. This includes omitting complex //! `unsafe` memory protection systems and just trying to make the best memory //! zeroing crate available. //! //! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html //! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear //! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear //! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html //! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html //! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html //! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html //! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst //! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html //! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html //! [memory-model]: https://github.com/nikomatsakis/rust-memory-model //! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md //! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data #![no_std] #![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)] #![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))] #![cfg_attr(feature = "nightly", feature(core_intrinsics))] #![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")] #[cfg(any(feature = "std", test))] #[cfg_attr(test, macro_use)] extern crate std; #[cfg(feature = "zeroize_derive")] #[allow(unused_imports)] #[macro_use] extern crate zeroize_derive; #[cfg(feature = "zeroize_derive")] #[doc(hidden)] pub use zeroize_derive::*; use core::{ptr, slice::IterMut, sync::atomic}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::prelude::*; #[cfg(feature = "std")] use std::prelude::v1::*; /// Trait for securely erasing types from memory pub trait Zeroize { /// Zero out this object from memory (using Rust or OS intrinsics which /// ensure the zeroization operation is not "optimized away") fn zeroize(&mut self); } /// Marker trait for types whose `Default` is the desired zeroization result pub trait DefaultIsZeroes: Copy + Default + Sized {} /// Marker trait intended for use with `zeroize_derive` which indicates that /// a type should have a drop handler which calls Zeroize. /// /// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an /// associated drop handler. pub trait ZeroizeOnDrop: Zeroize + Drop {} impl<Z> Zeroize for Z where Z: DefaultIsZeroes, { fn zeroize(&mut self) { volatile_set(self, Z::default()); atomic_fence(); } } macro_rules! impl_zeroize_with_default { ($($type:ty),+) => { $(impl DefaultIsZeroes for $type {})+ }; } impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize); impl_zeroize_with_default!(u16, u32, u64, u128, usize); impl_zeroize_with_default!(f32, f64, char, bool); /// On non-nightly targets, avoid special-casing u8 #[cfg(not(feature = "nightly"))] impl_zeroize_with_default!(u8); /// On nightly targets, don't implement `DefaultIsZeroes` so we can special /// case using batch set operations. #[cfg(feature = "nightly")] impl Zeroize for u8 { fn zeroize(&mut self) { volatile_set(self, 0); atomic_fence(); } } impl<'a, Z> Zeroize for IterMut<'a, Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { let default = Z::default(); for elem in self { volatile_set(elem, default); } atomic_fence(); } } /// Implement zeroize on all types that can be zeroized with the zero value impl<Z> Zeroize for [Z] where Z: DefaultIsZeroes, { fn zeroize(&mut self) { // TODO: batch volatile set operation? self.iter_mut().zeroize(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing #[cfg(feature = "nightly")] impl Zeroize for [u8] { fn zeroize(&mut self) { volatile_zero_bytes(self); atomic_fence(); } } #[cfg(feature = "alloc")] impl<Z> Zeroize for Vec<Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { self.resize(self.capacity(), Default::default()); self.as_mut_slice().zeroize(); self.clear(); } } #[cfg(feature = "alloc")] impl Zeroize for String { fn zeroize(&mut self) { unsafe { self.as_bytes_mut() }.zeroize(); debug_assert!(self.as_bytes().iter().all(|b| *b == 0)); self.clear(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing #[cfg(feature = "nightly")] macro_rules! impl_zeroize_for_byte_array { ($($size:expr),+) => { $( impl Zeroize for [u8; $size] { fn zeroize(&mut self) { volatile_zero_bytes(self.as_mut()); atomic_fence(); } } )+ }; } #[cfg(feature = "nightly")] impl_zeroize_for_byte_array!( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 ); /// Use fences to prevent accesses from being reordered before this /// point, which should hopefully help ensure that all accessors /// see zeroes after this point. #[inline] fn atomic_fence()
/// Set a mutable reference to a value to the given replacement #[inline] fn volatile_set<T: Copy + Sized>(dst: &mut T, src: T) { unsafe { ptr::write_volatile(dst, src) } } #[cfg(feature = "nightly")] #[inline] fn volatile_zero_bytes(dst: &mut [u8]) { unsafe { core::intrinsics::volatile_set_memory(dst.as_mut_ptr(), 0, dst.len()) } } #[cfg(test)] mod tests { use super::Zeroize; use std::prelude::v1::*; #[test] fn zeroize_byte_arrays() { let mut arr = [42u8; 64]; arr.zeroize(); assert_eq!(arr.as_ref(), [0u8; 64].as_ref()); } #[test] fn zeroize_vec() { let mut vec = vec![42; 3]; vec.zeroize(); assert!(vec.is_empty()); } #[test] fn zeroize_vec_past_len() { let mut vec = Vec::with_capacity(5); for i in 0..4 { vec.push(10 + i); } vec.clear(); // safe if: new_len <= capacity AND elements "were initialised" unsafe { vec.set_len(1); } assert_eq!(10, vec[0], "clear() hasn't erased our push()es"); vec.clear(); vec.zeroize(); unsafe { vec.set_len(4); } for i in 0..4 { assert_eq!(0, vec[i], "it's been zero'd"); } } #[test] fn zeroize_string() { let mut string = String::from("Hello, world!"); string.zeroize(); assert!(string.is_empty()); } #[test] fn zeroize_box() { let mut boxed_arr = Box::new([42u8; 3]); boxed_arr.zeroize(); assert_eq!(boxed_arr.as_ref(), &[0u8; 3]); } #[cfg(feature = "zeroize_derive")] mod derive { use super::*; #[derive(Zeroize)] struct ZeroizableTupleStruct([u8; 3]); #[test] fn derive_tuple_struct_test() { let mut value = ZeroizableTupleStruct([1, 2, 3]); value.zeroize(); assert_eq!(&value.0, &[0, 0, 0]) } #[derive(Zeroize)] struct ZeroizableStruct { string: String, vec: Vec<u8>, bytearray: [u8; 3], number: usize, boolean: bool, } #[test] fn derive_struct_test() { let mut value = ZeroizableStruct { string: "Hello, world!".to_owned(), vec: vec![1, 2, 3], bytearray: [4, 5, 6], number: 42, boolean: true, }; value.zeroize(); assert!(value.string.is_empty()); assert!(value.vec.is_empty()); assert_eq!(&value.bytearray, &[0, 0, 0]); assert_eq!(value.number, 0); assert!(!value.boolean); } } }
{ atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); }
identifier_body
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord>
pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential != credential); if original_len != network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10), rng.gen::<u8>().into(), ) } #[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
{ self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() }
identifier_body
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> { self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() } pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential != credential); if original_len != network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10),
#[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
rng.gen::<u8>().into(), ) }
random_line_split
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> { self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() } pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential != credential); if original_len != network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn
() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10), rng.gen::<u8>().into(), ) } #[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
create_inspect_persistence_channel
identifier_name
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> { self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() } pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential != credential); if original_len != network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid
else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10), rng.gen::<u8>().into(), ) } #[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
{ Some(config.clone()) }
conditional_block
yahoo_plus_save.py
''' Yahoo Plus Saver Copyright 2021 LossFuture Public Version 1.0 9/4/2021 ''' import requests import json import re import time import sys import sqlite3 from bs4 import BeautifulSoup import urllib.request from itertools import cycle #https://github.com/lossfuture/yahoo-answer-backup #add your perferred proxies here proxies=[ "127.0.0.1:1000", "127.0.1.2:4000" ] proxy_pool = cycle(proxies) class yahoo: def
(self,logfile_name,dbpath): dbpath='''file:{}?mode=rw'''.format(dbpath) self.conn = sqlite3.connect(dbpath,uri=True) self.c = self.conn.cursor() self.logfile_name="yahoo_plus.log" self.requests_cnt=0 self.https_proxy=proxies[0] @staticmethod def convert_tf(d): if d is True: return 1 else: return 0 @staticmethod def _curtime(): return time.strftime("%d/%m/%Y %H:%M:%S") def printtext(self,string,wt=False): #wt =withtime '''Print text to screen and log file''' if wt is True: str1="{0} : {1}".format(self._curtime(),string) try: print(str1) except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) #print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a")) print(str1,file= open(self.logfile_name, "a",encoding="utf8")) else: try: print(string) except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) print(string,file= open(self.logfile_name, "a",encoding="utf8")) def printtolog(self,string,wt=False): #wt =withtime '''Print text to log file only''' if wt is True: print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8")) else: print(string,file= open(self.logfile_name, "a",encoding="utf8")) def fetchdata_nomapping(self, sql,arg=None): '''Get data from database, return a list without column name''' if arg is None: self.c.execute(sql) else: self.c.execute(sql,arg) b=self.c.fetchall() return b def parse_file(self,file): a_file = open(file, "r",encoding="utf8") list_of_lists = [] for line in a_file: stripped_line = line.strip() if stripped_line[0] =="#": #skip with sharp symbol continue if len(stripped_line)==0: continue line_list = stripped_line.split(",") list_of_lists.append(line_list) a_file.close() return list_of_lists def remove_preferences_page(self,soup): self.printtext("I hate this!!!",wt=True) calcal=soup.find(id='pref') calcal=calcal.find(class_='left') calcal=calcal.find("a").get("href") #calcal= return calcal def new_request(self,url): headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"} print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times") if self.requests_cnt>22000: self.https_proxy=next(proxy_pool) self.printtext("Using proxy {}".format(self.https_proxy),wt=True) self.requests_cnt=0 while True: try: proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy} response = requests.get(url,allow_redirects=True,proxies=proxyDict) self.requests_cnt+=1 break except TimeoutError: self.printtext("TimeoutError: Connection Timeout",wt=True) time.sleep(10) except requests.exceptions.ProxyError as err: #OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused self.printtext(("Proxy Error:", err),wt=True) self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True) time.sleep(10) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 except requests.exceptions.SSLError as err: self.printtext(("SSL Error:", err),wt=True) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 if response.status_code == 404: self.printtext("Error 404 for: {}".format(url),wt=True) return None if response.status_code != 200: self.printtext("HTTP Error {} ".format(response.status_code),wt=True) print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8")) return None #response.raise_for_status() for resp in response.history: print(resp.status_code, resp.url) soup = BeautifulSoup(response.text, 'html.parser') return soup def insert_category_data(self,data): data=data["itemListElement"] flevel=data[0]["item"] cat_url00=re.split('\/|\?|\=|\&',flevel) cat_id00=cat_url00[6] for i in data: cat_url=re.split('\/|\?|\=|\&',i["item"]) cat_id=cat_url[6] #print(cat_id) level=i["position"] if level ==1: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"])) else: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"])) self.conn.commit() return data[-1]["item"] def insert_data(self,oldqid,newqid,cat_id,data,user_url): try: data=data["mainEntity"] except KeyError: return title=data["name"] content=data["text"] ansc=data["answerCount"] date=data["dateCreated"] author_t=data["author"]["@type"] author_n=data["author"]["name"] if not oldqid: self.printtext("Insert {}, question:{}".format(newqid,title),wt=True) else: self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True) self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0])) user_urlpos=1 if "acceptedAnswer" in data: data2=data["acceptedAnswer"] content2=data2["text"] date2=data2["dateCreated"] author_t2=data2["author"]["@type"] author_n2=data2["author"]["name"] upvote_c2=data2["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1: pass else: self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; if data["suggestedAnswer"]: for i in data["suggestedAnswer"]: content2=i["text"] date2=i["dateCreated"] author_t2=i["author"]["@type"] author_n2=i["author"]["name"] upvote_c2=i["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1: user_urlpos+=1; continue self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; self.conn.commit() self.printtext("Answer Count: {}".format(ansc),wt=True) #self.printtext("Insert {} completed".format(newqid),wt=True) return def parsing_area(self,link): #https://hk.answers.yahoo.com/question/index?qid=20210405072002AAPcNek self.printtext(link,wt=True) spilited_url=re.split('\/|\?|\=|\&',link) questionid=spilited_url[6] rows2=self.fetchdata_nomapping("SELECT 1 FROM question WHERE newqid=? LIMIT 1",(questionid,)) rows3=self.fetchdata_nomapping("SELECT 1 FROM question WHERE oldqid=? LIMIT 1",(questionid,)) if len(rows2)>0 or len(rows3)>0: self.printtext("Already fetched, skip request",wt=True) return soup=self.new_request(link) if soup is None: return #print response into a html #print(soup.prettify(),file= open(questionid+".html", "w",encoding="utf8")) script = soup.find_all('script', type=["application/ld+json"]) new_qid=soup.find("meta", property="og:url") #print(new_qid["content"] if new_qid else "No meta url given") spilited_url=re.split('\/|\?|\=|\&',new_qid["content"]) new_questionid=spilited_url[6] if questionid==new_questionid: questionid=None user_url_soup=soup.find_all("div", class_="UserProfile__avatar___2gI-3") user_url=[] for k in user_url_soup: try: p=k.find("a").get("href") spilited_url=re.split('\/|\?|\=|\&',p) user_url.append(spilited_url[4]) #print(k.find("a").get("href")) except AttributeError: user_url.append(None) json1=json.loads(script[0].contents[0]) json2=json.loads(script[1].contents[0]) #print(json1) #print(json2) cat_url=self.insert_category_data(json1) cat_url=re.split('\/|\?|\=|\&',cat_url) cat_id=cat_url[6] self.insert_data(questionid,new_questionid,cat_id,json2,user_url) time.sleep(3) return def mainloop(self,txt_file): #self.printtext("Start",wt=True) self.printtolog("\n{:=^60}".format("Python Start")) list_of_lists=self.parse_file(txt_file) #a_file = open(txt_file, "r") #list_of_lists = [] #for line in a_file: # stripped_line = line.strip() # if stripped_line[0] =="#": #skip with sharp symbol # continue # if len(stripped_line)==0: # continue # line_list = stripped_line.split() # list_of_lists.append(line_list) # a_file.close() for i in list_of_lists: self.parsing_area(i[0]) self.printtext("Insert Complete",wt=True) return def search(self,line): keyword=line[0] try: start_pos=int(line[1])*10-9 batch=int(line[1])-1 except: start_pos=0 batch=0 self.printtolog("Keyword :{}".format(keyword)) url="https://hk.knowledge.search.yahoo.com/search?ei=UTF-8&vm=r&rd=r1&fr=FP-tab-web-t&p={}&b={}".format(urllib.parse.quote_plus(keyword),start_pos) #print(url) while True: if batch>=102: #unkwown error for cannot browse page 102 break batch+=1 self.printtext("Batch {}:{}".format(batch,url),wt=True) soup=self.new_request(url) results=soup.find(id='web') if results is None: url=self.remove_preferences_page(soup) batch-=1 continue results=results.find('ol') results=results.find_all("li") for i in results: link=i.find("a").get("href") self.parsing_area(link) next_page=soup.find("ol",class_='searchBottom') try: url=next_page.find("a",class_='next').get("href") except AttributeError:#no next page break #self.printtext("Searching completed") self.printtolog("\n{:=^60}".format("Searching {} completed".format(keyword))) return def search_loop(self,keywordstxt): self.printtolog("\n{:=^60}".format("Python Start")) keywords=self.parse_file(keywordstxt) #print(keywords) for i in keywords: self.search(i) time.sleep(20) def start(): #db file here yahoo1=yahoo("<your_db_file_here>") #dont remove this line #yahoo1.mainloop("yahoo_url_list1.txt") yahoo1.search_loop("keywords.txt") if __name__ == '__main__': start()
__init__
identifier_name
yahoo_plus_save.py
''' Yahoo Plus Saver Copyright 2021 LossFuture Public Version 1.0 9/4/2021 ''' import requests import json import re import time import sys import sqlite3 from bs4 import BeautifulSoup import urllib.request from itertools import cycle #https://github.com/lossfuture/yahoo-answer-backup #add your perferred proxies here proxies=[ "127.0.0.1:1000", "127.0.1.2:4000" ] proxy_pool = cycle(proxies) class yahoo: def __init__(self,logfile_name,dbpath): dbpath='''file:{}?mode=rw'''.format(dbpath) self.conn = sqlite3.connect(dbpath,uri=True) self.c = self.conn.cursor() self.logfile_name="yahoo_plus.log" self.requests_cnt=0 self.https_proxy=proxies[0] @staticmethod def convert_tf(d): if d is True: return 1 else: return 0 @staticmethod def _curtime(): return time.strftime("%d/%m/%Y %H:%M:%S") def printtext(self,string,wt=False): #wt =withtime '''Print text to screen and log file''' if wt is True: str1="{0} : {1}".format(self._curtime(),string) try: print(str1) except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) #print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a")) print(str1,file= open(self.logfile_name, "a",encoding="utf8")) else: try: print(string) except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) print(string,file= open(self.logfile_name, "a",encoding="utf8")) def printtolog(self,string,wt=False): #wt =withtime '''Print text to log file only''' if wt is True: print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8")) else: print(string,file= open(self.logfile_name, "a",encoding="utf8")) def fetchdata_nomapping(self, sql,arg=None):
def parse_file(self,file): a_file = open(file, "r",encoding="utf8") list_of_lists = [] for line in a_file: stripped_line = line.strip() if stripped_line[0] =="#": #skip with sharp symbol continue if len(stripped_line)==0: continue line_list = stripped_line.split(",") list_of_lists.append(line_list) a_file.close() return list_of_lists def remove_preferences_page(self,soup): self.printtext("I hate this!!!",wt=True) calcal=soup.find(id='pref') calcal=calcal.find(class_='left') calcal=calcal.find("a").get("href") #calcal= return calcal def new_request(self,url): headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"} print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times") if self.requests_cnt>22000: self.https_proxy=next(proxy_pool) self.printtext("Using proxy {}".format(self.https_proxy),wt=True) self.requests_cnt=0 while True: try: proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy} response = requests.get(url,allow_redirects=True,proxies=proxyDict) self.requests_cnt+=1 break except TimeoutError: self.printtext("TimeoutError: Connection Timeout",wt=True) time.sleep(10) except requests.exceptions.ProxyError as err: #OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused self.printtext(("Proxy Error:", err),wt=True) self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True) time.sleep(10) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 except requests.exceptions.SSLError as err: self.printtext(("SSL Error:", err),wt=True) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 if response.status_code == 404: self.printtext("Error 404 for: {}".format(url),wt=True) return None if response.status_code != 200: self.printtext("HTTP Error {} ".format(response.status_code),wt=True) print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8")) return None #response.raise_for_status() for resp in response.history: print(resp.status_code, resp.url) soup = BeautifulSoup(response.text, 'html.parser') return soup def insert_category_data(self,data): data=data["itemListElement"] flevel=data[0]["item"] cat_url00=re.split('\/|\?|\=|\&',flevel) cat_id00=cat_url00[6] for i in data: cat_url=re.split('\/|\?|\=|\&',i["item"]) cat_id=cat_url[6] #print(cat_id) level=i["position"] if level ==1: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"])) else: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"])) self.conn.commit() return data[-1]["item"] def insert_data(self,oldqid,newqid,cat_id,data,user_url): try: data=data["mainEntity"] except KeyError: return title=data["name"] content=data["text"] ansc=data["answerCount"] date=data["dateCreated"] author_t=data["author"]["@type"] author_n=data["author"]["name"] if not oldqid: self.printtext("Insert {}, question:{}".format(newqid,title),wt=True) else: self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True) self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0])) user_urlpos=1 if "acceptedAnswer" in data: data2=data["acceptedAnswer"] content2=data2["text"] date2=data2["dateCreated"] author_t2=data2["author"]["@type"] author_n2=data2["author"]["name"] upvote_c2=data2["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1: pass else: self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; if data["suggestedAnswer"]: for i in data["suggestedAnswer"]: content2=i["text"] date2=i["dateCreated"] author_t2=i["author"]["@type"] author_n2=i["author"]["name"] upvote_c2=i["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1: user_urlpos+=1; continue self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; self.conn.commit() self.printtext("Answer Count: {}".format(ansc),wt=True) #self.printtext("Insert {} completed".format(newqid),wt=True) return def parsing_area(self,link): #https://hk.answers.yahoo.com/question/index?qid=20210405072002AAPcNek self.printtext(link,wt=True) spilited_url=re.split('\/|\?|\=|\&',link) questionid=spilited_url[6] rows2=self.fetchdata_nomapping("SELECT 1 FROM question WHERE newqid=? LIMIT 1",(questionid,)) rows3=self.fetchdata_nomapping("SELECT 1 FROM question WHERE oldqid=? LIMIT 1",(questionid,)) if len(rows2)>0 or len(rows3)>0: self.printtext("Already fetched, skip request",wt=True) return soup=self.new_request(link) if soup is None: return #print response into a html #print(soup.prettify(),file= open(questionid+".html", "w",encoding="utf8")) script = soup.find_all('script', type=["application/ld+json"]) new_qid=soup.find("meta", property="og:url") #print(new_qid["content"] if new_qid else "No meta url given") spilited_url=re.split('\/|\?|\=|\&',new_qid["content"]) new_questionid=spilited_url[6] if questionid==new_questionid: questionid=None user_url_soup=soup.find_all("div", class_="UserProfile__avatar___2gI-3") user_url=[] for k in user_url_soup: try: p=k.find("a").get("href") spilited_url=re.split('\/|\?|\=|\&',p) user_url.append(spilited_url[4]) #print(k.find("a").get("href")) except AttributeError: user_url.append(None) json1=json.loads(script[0].contents[0]) json2=json.loads(script[1].contents[0]) #print(json1) #print(json2) cat_url=self.insert_category_data(json1) cat_url=re.split('\/|\?|\=|\&',cat_url) cat_id=cat_url[6] self.insert_data(questionid,new_questionid,cat_id,json2,user_url) time.sleep(3) return def mainloop(self,txt_file): #self.printtext("Start",wt=True) self.printtolog("\n{:=^60}".format("Python Start")) list_of_lists=self.parse_file(txt_file) #a_file = open(txt_file, "r") #list_of_lists = [] #for line in a_file: # stripped_line = line.strip() # if stripped_line[0] =="#": #skip with sharp symbol # continue # if len(stripped_line)==0: # continue # line_list = stripped_line.split() # list_of_lists.append(line_list) # a_file.close() for i in list_of_lists: self.parsing_area(i[0]) self.printtext("Insert Complete",wt=True) return def search(self,line): keyword=line[0] try: start_pos=int(line[1])*10-9 batch=int(line[1])-1 except: start_pos=0 batch=0 self.printtolog("Keyword :{}".format(keyword)) url="https://hk.knowledge.search.yahoo.com/search?ei=UTF-8&vm=r&rd=r1&fr=FP-tab-web-t&p={}&b={}".format(urllib.parse.quote_plus(keyword),start_pos) #print(url) while True: if batch>=102: #unkwown error for cannot browse page 102 break batch+=1 self.printtext("Batch {}:{}".format(batch,url),wt=True) soup=self.new_request(url) results=soup.find(id='web') if results is None: url=self.remove_preferences_page(soup) batch-=1 continue results=results.find('ol') results=results.find_all("li") for i in results: link=i.find("a").get("href") self.parsing_area(link) next_page=soup.find("ol",class_='searchBottom') try: url=next_page.find("a",class_='next').get("href") except AttributeError:#no next page break #self.printtext("Searching completed") self.printtolog("\n{:=^60}".format("Searching {} completed".format(keyword))) return def search_loop(self,keywordstxt): self.printtolog("\n{:=^60}".format("Python Start")) keywords=self.parse_file(keywordstxt) #print(keywords) for i in keywords: self.search(i) time.sleep(20) def start(): #db file here yahoo1=yahoo("<your_db_file_here>") #dont remove this line #yahoo1.mainloop("yahoo_url_list1.txt") yahoo1.search_loop("keywords.txt") if __name__ == '__main__': start()
'''Get data from database, return a list without column name''' if arg is None: self.c.execute(sql) else: self.c.execute(sql,arg) b=self.c.fetchall() return b
identifier_body
yahoo_plus_save.py
''' Yahoo Plus Saver Copyright 2021 LossFuture Public Version 1.0 9/4/2021 ''' import requests import json import re import time import sys import sqlite3 from bs4 import BeautifulSoup import urllib.request from itertools import cycle #https://github.com/lossfuture/yahoo-answer-backup #add your perferred proxies here proxies=[ "127.0.0.1:1000", "127.0.1.2:4000" ] proxy_pool = cycle(proxies) class yahoo: def __init__(self,logfile_name,dbpath): dbpath='''file:{}?mode=rw'''.format(dbpath) self.conn = sqlite3.connect(dbpath,uri=True) self.c = self.conn.cursor() self.logfile_name="yahoo_plus.log" self.requests_cnt=0 self.https_proxy=proxies[0] @staticmethod def convert_tf(d): if d is True: return 1 else: return 0 @staticmethod def _curtime(): return time.strftime("%d/%m/%Y %H:%M:%S") def printtext(self,string,wt=False): #wt =withtime '''Print text to screen and log file''' if wt is True: str1="{0} : {1}".format(self._curtime(),string) try: print(str1) except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) #print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a")) print(str1,file= open(self.logfile_name, "a",encoding="utf8")) else: try: print(string) except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) print(string,file= open(self.logfile_name, "a",encoding="utf8")) def printtolog(self,string,wt=False): #wt =withtime '''Print text to log file only''' if wt is True: print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8")) else: print(string,file= open(self.logfile_name, "a",encoding="utf8")) def fetchdata_nomapping(self, sql,arg=None): '''Get data from database, return a list without column name''' if arg is None: self.c.execute(sql) else: self.c.execute(sql,arg) b=self.c.fetchall() return b def parse_file(self,file): a_file = open(file, "r",encoding="utf8") list_of_lists = [] for line in a_file: stripped_line = line.strip() if stripped_line[0] =="#": #skip with sharp symbol continue if len(stripped_line)==0: continue line_list = stripped_line.split(",") list_of_lists.append(line_list) a_file.close() return list_of_lists def remove_preferences_page(self,soup): self.printtext("I hate this!!!",wt=True) calcal=soup.find(id='pref') calcal=calcal.find(class_='left') calcal=calcal.find("a").get("href") #calcal= return calcal def new_request(self,url): headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"} print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times") if self.requests_cnt>22000: self.https_proxy=next(proxy_pool) self.printtext("Using proxy {}".format(self.https_proxy),wt=True) self.requests_cnt=0 while True: try: proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy} response = requests.get(url,allow_redirects=True,proxies=proxyDict) self.requests_cnt+=1 break except TimeoutError: self.printtext("TimeoutError: Connection Timeout",wt=True) time.sleep(10) except requests.exceptions.ProxyError as err: #OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused self.printtext(("Proxy Error:", err),wt=True) self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True) time.sleep(10) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 except requests.exceptions.SSLError as err: self.printtext(("SSL Error:", err),wt=True) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 if response.status_code == 404: self.printtext("Error 404 for: {}".format(url),wt=True) return None if response.status_code != 200: self.printtext("HTTP Error {} ".format(response.status_code),wt=True) print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8")) return None #response.raise_for_status() for resp in response.history: print(resp.status_code, resp.url) soup = BeautifulSoup(response.text, 'html.parser') return soup def insert_category_data(self,data): data=data["itemListElement"] flevel=data[0]["item"] cat_url00=re.split('\/|\?|\=|\&',flevel) cat_id00=cat_url00[6] for i in data: cat_url=re.split('\/|\?|\=|\&',i["item"]) cat_id=cat_url[6] #print(cat_id) level=i["position"] if level ==1: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"])) else: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"])) self.conn.commit() return data[-1]["item"] def insert_data(self,oldqid,newqid,cat_id,data,user_url): try: data=data["mainEntity"] except KeyError: return title=data["name"] content=data["text"] ansc=data["answerCount"] date=data["dateCreated"] author_t=data["author"]["@type"] author_n=data["author"]["name"] if not oldqid: self.printtext("Insert {}, question:{}".format(newqid,title),wt=True) else: self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True) self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0])) user_urlpos=1 if "acceptedAnswer" in data: data2=data["acceptedAnswer"] content2=data2["text"] date2=data2["dateCreated"] author_t2=data2["author"]["@type"] author_n2=data2["author"]["name"] upvote_c2=data2["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1:
else: self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; if data["suggestedAnswer"]: for i in data["suggestedAnswer"]: content2=i["text"] date2=i["dateCreated"] author_t2=i["author"]["@type"] author_n2=i["author"]["name"] upvote_c2=i["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1: user_urlpos+=1; continue self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; self.conn.commit() self.printtext("Answer Count: {}".format(ansc),wt=True) #self.printtext("Insert {} completed".format(newqid),wt=True) return def parsing_area(self,link): #https://hk.answers.yahoo.com/question/index?qid=20210405072002AAPcNek self.printtext(link,wt=True) spilited_url=re.split('\/|\?|\=|\&',link) questionid=spilited_url[6] rows2=self.fetchdata_nomapping("SELECT 1 FROM question WHERE newqid=? LIMIT 1",(questionid,)) rows3=self.fetchdata_nomapping("SELECT 1 FROM question WHERE oldqid=? LIMIT 1",(questionid,)) if len(rows2)>0 or len(rows3)>0: self.printtext("Already fetched, skip request",wt=True) return soup=self.new_request(link) if soup is None: return #print response into a html #print(soup.prettify(),file= open(questionid+".html", "w",encoding="utf8")) script = soup.find_all('script', type=["application/ld+json"]) new_qid=soup.find("meta", property="og:url") #print(new_qid["content"] if new_qid else "No meta url given") spilited_url=re.split('\/|\?|\=|\&',new_qid["content"]) new_questionid=spilited_url[6] if questionid==new_questionid: questionid=None user_url_soup=soup.find_all("div", class_="UserProfile__avatar___2gI-3") user_url=[] for k in user_url_soup: try: p=k.find("a").get("href") spilited_url=re.split('\/|\?|\=|\&',p) user_url.append(spilited_url[4]) #print(k.find("a").get("href")) except AttributeError: user_url.append(None) json1=json.loads(script[0].contents[0]) json2=json.loads(script[1].contents[0]) #print(json1) #print(json2) cat_url=self.insert_category_data(json1) cat_url=re.split('\/|\?|\=|\&',cat_url) cat_id=cat_url[6] self.insert_data(questionid,new_questionid,cat_id,json2,user_url) time.sleep(3) return def mainloop(self,txt_file): #self.printtext("Start",wt=True) self.printtolog("\n{:=^60}".format("Python Start")) list_of_lists=self.parse_file(txt_file) #a_file = open(txt_file, "r") #list_of_lists = [] #for line in a_file: # stripped_line = line.strip() # if stripped_line[0] =="#": #skip with sharp symbol # continue # if len(stripped_line)==0: # continue # line_list = stripped_line.split() # list_of_lists.append(line_list) # a_file.close() for i in list_of_lists: self.parsing_area(i[0]) self.printtext("Insert Complete",wt=True) return def search(self,line): keyword=line[0] try: start_pos=int(line[1])*10-9 batch=int(line[1])-1 except: start_pos=0 batch=0 self.printtolog("Keyword :{}".format(keyword)) url="https://hk.knowledge.search.yahoo.com/search?ei=UTF-8&vm=r&rd=r1&fr=FP-tab-web-t&p={}&b={}".format(urllib.parse.quote_plus(keyword),start_pos) #print(url) while True: if batch>=102: #unkwown error for cannot browse page 102 break batch+=1 self.printtext("Batch {}:{}".format(batch,url),wt=True) soup=self.new_request(url) results=soup.find(id='web') if results is None: url=self.remove_preferences_page(soup) batch-=1 continue results=results.find('ol') results=results.find_all("li") for i in results: link=i.find("a").get("href") self.parsing_area(link) next_page=soup.find("ol",class_='searchBottom') try: url=next_page.find("a",class_='next').get("href") except AttributeError:#no next page break #self.printtext("Searching completed") self.printtolog("\n{:=^60}".format("Searching {} completed".format(keyword))) return def search_loop(self,keywordstxt): self.printtolog("\n{:=^60}".format("Python Start")) keywords=self.parse_file(keywordstxt) #print(keywords) for i in keywords: self.search(i) time.sleep(20) def start(): #db file here yahoo1=yahoo("<your_db_file_here>") #dont remove this line #yahoo1.mainloop("yahoo_url_list1.txt") yahoo1.search_loop("keywords.txt") if __name__ == '__main__': start()
pass
conditional_block
yahoo_plus_save.py
''' Yahoo Plus Saver Copyright 2021 LossFuture Public Version 1.0 9/4/2021 ''' import requests import json import re import time import sys import sqlite3 from bs4 import BeautifulSoup import urllib.request from itertools import cycle #https://github.com/lossfuture/yahoo-answer-backup #add your perferred proxies here proxies=[ "127.0.0.1:1000", "127.0.1.2:4000" ] proxy_pool = cycle(proxies) class yahoo: def __init__(self,logfile_name,dbpath): dbpath='''file:{}?mode=rw'''.format(dbpath) self.conn = sqlite3.connect(dbpath,uri=True) self.c = self.conn.cursor() self.logfile_name="yahoo_plus.log" self.requests_cnt=0 self.https_proxy=proxies[0] @staticmethod def convert_tf(d): if d is True: return 1 else: return 0 @staticmethod def _curtime(): return time.strftime("%d/%m/%Y %H:%M:%S") def printtext(self,string,wt=False): #wt =withtime '''Print text to screen and log file''' if wt is True: str1="{0} : {1}".format(self._curtime(),string) try:
except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) #print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a")) print(str1,file= open(self.logfile_name, "a",encoding="utf8")) else: try: print(string) except UnicodeEncodeError: print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime())) print(string,file= open(self.logfile_name, "a",encoding="utf8")) def printtolog(self,string,wt=False): #wt =withtime '''Print text to log file only''' if wt is True: print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8")) else: print(string,file= open(self.logfile_name, "a",encoding="utf8")) def fetchdata_nomapping(self, sql,arg=None): '''Get data from database, return a list without column name''' if arg is None: self.c.execute(sql) else: self.c.execute(sql,arg) b=self.c.fetchall() return b def parse_file(self,file): a_file = open(file, "r",encoding="utf8") list_of_lists = [] for line in a_file: stripped_line = line.strip() if stripped_line[0] =="#": #skip with sharp symbol continue if len(stripped_line)==0: continue line_list = stripped_line.split(",") list_of_lists.append(line_list) a_file.close() return list_of_lists def remove_preferences_page(self,soup): self.printtext("I hate this!!!",wt=True) calcal=soup.find(id='pref') calcal=calcal.find(class_='left') calcal=calcal.find("a").get("href") #calcal= return calcal def new_request(self,url): headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"} print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times") if self.requests_cnt>22000: self.https_proxy=next(proxy_pool) self.printtext("Using proxy {}".format(self.https_proxy),wt=True) self.requests_cnt=0 while True: try: proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy} response = requests.get(url,allow_redirects=True,proxies=proxyDict) self.requests_cnt+=1 break except TimeoutError: self.printtext("TimeoutError: Connection Timeout",wt=True) time.sleep(10) except requests.exceptions.ProxyError as err: #OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused self.printtext(("Proxy Error:", err),wt=True) self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True) time.sleep(10) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 except requests.exceptions.SSLError as err: self.printtext(("SSL Error:", err),wt=True) self.https_proxy=next(proxy_pool) self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True) self.requests_cnt=0 if response.status_code == 404: self.printtext("Error 404 for: {}".format(url),wt=True) return None if response.status_code != 200: self.printtext("HTTP Error {} ".format(response.status_code),wt=True) print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8")) return None #response.raise_for_status() for resp in response.history: print(resp.status_code, resp.url) soup = BeautifulSoup(response.text, 'html.parser') return soup def insert_category_data(self,data): data=data["itemListElement"] flevel=data[0]["item"] cat_url00=re.split('\/|\?|\=|\&',flevel) cat_id00=cat_url00[6] for i in data: cat_url=re.split('\/|\?|\=|\&',i["item"]) cat_id=cat_url[6] #print(cat_id) level=i["position"] if level ==1: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"])) else: self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"])) self.conn.commit() return data[-1]["item"] def insert_data(self,oldqid,newqid,cat_id,data,user_url): try: data=data["mainEntity"] except KeyError: return title=data["name"] content=data["text"] ansc=data["answerCount"] date=data["dateCreated"] author_t=data["author"]["@type"] author_n=data["author"]["name"] if not oldqid: self.printtext("Insert {}, question:{}".format(newqid,title),wt=True) else: self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True) self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0])) user_urlpos=1 if "acceptedAnswer" in data: data2=data["acceptedAnswer"] content2=data2["text"] date2=data2["dateCreated"] author_t2=data2["author"]["@type"] author_n2=data2["author"]["name"] upvote_c2=data2["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1: pass else: self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; if data["suggestedAnswer"]: for i in data["suggestedAnswer"]: content2=i["text"] date2=i["dateCreated"] author_t2=i["author"]["@type"] author_n2=i["author"]["name"] upvote_c2=i["upvoteCount"] rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2)) if len(rows2)==1: user_urlpos+=1; continue self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2)) user_urlpos+=1; self.conn.commit() self.printtext("Answer Count: {}".format(ansc),wt=True) #self.printtext("Insert {} completed".format(newqid),wt=True) return def parsing_area(self,link): #https://hk.answers.yahoo.com/question/index?qid=20210405072002AAPcNek self.printtext(link,wt=True) spilited_url=re.split('\/|\?|\=|\&',link) questionid=spilited_url[6] rows2=self.fetchdata_nomapping("SELECT 1 FROM question WHERE newqid=? LIMIT 1",(questionid,)) rows3=self.fetchdata_nomapping("SELECT 1 FROM question WHERE oldqid=? LIMIT 1",(questionid,)) if len(rows2)>0 or len(rows3)>0: self.printtext("Already fetched, skip request",wt=True) return soup=self.new_request(link) if soup is None: return #print response into a html #print(soup.prettify(),file= open(questionid+".html", "w",encoding="utf8")) script = soup.find_all('script', type=["application/ld+json"]) new_qid=soup.find("meta", property="og:url") #print(new_qid["content"] if new_qid else "No meta url given") spilited_url=re.split('\/|\?|\=|\&',new_qid["content"]) new_questionid=spilited_url[6] if questionid==new_questionid: questionid=None user_url_soup=soup.find_all("div", class_="UserProfile__avatar___2gI-3") user_url=[] for k in user_url_soup: try: p=k.find("a").get("href") spilited_url=re.split('\/|\?|\=|\&',p) user_url.append(spilited_url[4]) #print(k.find("a").get("href")) except AttributeError: user_url.append(None) json1=json.loads(script[0].contents[0]) json2=json.loads(script[1].contents[0]) #print(json1) #print(json2) cat_url=self.insert_category_data(json1) cat_url=re.split('\/|\?|\=|\&',cat_url) cat_id=cat_url[6] self.insert_data(questionid,new_questionid,cat_id,json2,user_url) time.sleep(3) return def mainloop(self,txt_file): #self.printtext("Start",wt=True) self.printtolog("\n{:=^60}".format("Python Start")) list_of_lists=self.parse_file(txt_file) #a_file = open(txt_file, "r") #list_of_lists = [] #for line in a_file: # stripped_line = line.strip() # if stripped_line[0] =="#": #skip with sharp symbol # continue # if len(stripped_line)==0: # continue # line_list = stripped_line.split() # list_of_lists.append(line_list) # a_file.close() for i in list_of_lists: self.parsing_area(i[0]) self.printtext("Insert Complete",wt=True) return def search(self,line): keyword=line[0] try: start_pos=int(line[1])*10-9 batch=int(line[1])-1 except: start_pos=0 batch=0 self.printtolog("Keyword :{}".format(keyword)) url="https://hk.knowledge.search.yahoo.com/search?ei=UTF-8&vm=r&rd=r1&fr=FP-tab-web-t&p={}&b={}".format(urllib.parse.quote_plus(keyword),start_pos) #print(url) while True: if batch>=102: #unkwown error for cannot browse page 102 break batch+=1 self.printtext("Batch {}:{}".format(batch,url),wt=True) soup=self.new_request(url) results=soup.find(id='web') if results is None: url=self.remove_preferences_page(soup) batch-=1 continue results=results.find('ol') results=results.find_all("li") for i in results: link=i.find("a").get("href") self.parsing_area(link) next_page=soup.find("ol",class_='searchBottom') try: url=next_page.find("a",class_='next').get("href") except AttributeError:#no next page break #self.printtext("Searching completed") self.printtolog("\n{:=^60}".format("Searching {} completed".format(keyword))) return def search_loop(self,keywordstxt): self.printtolog("\n{:=^60}".format("Python Start")) keywords=self.parse_file(keywordstxt) #print(keywords) for i in keywords: self.search(i) time.sleep(20) def start(): #db file here yahoo1=yahoo("<your_db_file_here>") #dont remove this line #yahoo1.mainloop("yahoo_url_list1.txt") yahoo1.search_loop("keywords.txt") if __name__ == '__main__': start()
print(str1)
random_line_split
anichart.py
## Builtin import copy import csv import datetime import functools import json import re ## Custom Module import AL_Web as web from AL_Web import requests as alrequests from aldb2 import SeasonCharts CHARTNAME = "AniChart" APIURL = r"http://anichart.net/api/browse/anime?season={season}&year={year}&sort=-score&full_page=true&airing_data=true&page=1" ## For conversion of youtube_id API value YOUTUBELINK = "www.youtube.com/watch?v={youtube_id}" ## Ordered for desired output APIDATAHEADERS = ['title_japanese', 'title_romaji', 'title_english', 'first episode', 'airing', 'hashtag', 'image', 'airing_status', 'anilist_link', 'average_score', 'description', 'duration', 'end_date', 'external_links', 'genres', 'id', 'mal_link', 'popularity', 'rankings', 'season', 'source', 'start_date', 'studio', 'synonyms', 'tags', 'total_episodes', 'type', 'youtube_id'] sessiondecorator = alrequests.session_decorator_factory(useragent = True, referrer = "http://anichart.net") def checkcsrf(func): """ Decorator for functions that require anichart's csrf token ("X-CSRF-TOKEN"; i.e.- API calls) """ @functools.wraps(func) @sessiondecorator def wrapper(*args, session = None, **kw): if "X-CSRF-TOKEN" not in session.cookies: getcsrf(session) return func(*args,session = session, **kw) return wrapper def getcsrf(session): """ Adds a csrf token to the session """ session.get("http://anichart.net") class Show(dict): """ A simple container used to facilitate cleaning API data """ def __hash__(self): return self['id'] def serialize(self): out = copy.deepcopy(self) if out.get("airing"): out['airing'] = out['airing']['time'].strftime("%I:%M %p") return out @checkcsrf def getshowsbyseason(season,year, session = None): """ Queries the API for the given season-year and returns the API information """ url = APIURL.format(season=season, year = year) headers = {"X-CSRF-TOKEN":session.cookies['X-CSRF-TOKEN']} data = alrequests.GET_json(url,session = session, headers=headers) return {cat:[Show(**show) for show in shows] for cat,shows in data.items()} def consolidate_data(data): """ Consolidates the API data into a single list The original root value (i.e.- tv, leftovers) is added as the "category" key and title()'d. """ out = list() ## API data is organized in {category (tv,tvshort,movie,etc.):[list of show dicts]} for cat,shows in data.items(): for show in shows: show['category'] = cat.title() out.append(show) return out def test_rawdata(data): """ Checks if the data has been consolidated, returning True if it has not, otherwise False """ base = list(data)[0] if base in ["tv","leftovers","tv short","movie","OVA / ONA / Special"]: return True return False def check_rawdata(data): """ Checks if the data has been consolidated using test_rawdata; if it hasn't, this function will consolidate the data """ if test_rawdata(data): return consolidate_data(data) return data def fixstartdate(startdate): """ Converts startdate default "yyyymmdd" to "dd/mm/yyyy" If startdate is falsey, returns a default value of "01/01/2017" """ if not startdate: return "01/01/2017" s = str(startdate) d,m,y = [max(dt,1) for dt in [int(s[6:8]),int(s[4:6]),int(s[:4])]] return f"{d:0>2}/{m:0>2}/{y:0>4}" def getseason(data): """ Tries to determine the season that an anime aired based on its season value and it's ratings """ ## Season key is the most reliable season = data.get("season") if season: ## Season key is an integer formatted "YYS" and is 2000-based (i.e.- 171 == 2017-Winter) season = str(season) year = int(f"20{season[:2]}") ## Anichart Season key is 1-indexed season = int(season[2]) - 1 ## This should normally pass; if it consistently does not, we'll have to investigate why try: return SeasonCharts.buildseason(season,year) ## If something goes wrong, we'll try another method except: print(f"Failed to parse season: {data['season']}") ## Next, we'll iterate over rankings to try to determine the season/year ## There are multiple types of rankings based on season, year, and both combined, ## so we'll piece it together based on whatever we come across first season,year = None,None for ranking in data.get("rankings",list()): ## Quicker exit (without just making this loop its own function) if season and year: continue ## We'll ignore stuff we've already gotten and assume that nothing in ## rankings contradicts eachother if not season: ## Defaults to None one way or another if it's not supplied season = ranking.get("season") if not year: year = ranking.get("year") ## Check if we made it if season and year: ## As above, this should always work out-of-the-box try: return SeasonCharts.buildseason(season,year) except: print(season,year) ## Welp, we're stumped... return None def overwrite_season(function): """ A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts. The season parameter accepted by this decorator will NOT be passed on to the function. season is updated on the output, but in many cases will mutate the input as well due to the way this module handles data. """ @functools.wraps(function) def inner(*args, season = None, **kw): data = function(*args,**kw) if season: replace_season(data,season) return data return inner def replace_season(data,season):
@overwrite_season def cleanapidata_csv(data): """ Cleans the API data for use in a CSV and returns a list of headers along with the clean data. Converts startdate to EST startdate. Adds firstepisode date. Adds EST_airing time. Features overwrite_season decorator Returns ([list of Headers (strs)],[list of Shows (dicts)]) """ out = list() siteheaders = [] data = check_rawdata(data) for show in data: if show.get("airing"): ## Convert airing to datetime for utility show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST) ## Format start_date to Day/Month/Year if show.get("start_date"): ## start_date is an integer representing yyyymmdd startdate = fixstartdate(show['startdate']) startdt = None ## Test that start_date is a valid date try: startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y") except: ## If not, use "airing" if show.get("airing"): ## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week) startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1) ## If "airing" doesn't work, the month and year are normally correct ## so we'll just replace the day else: s = str(show['startdate']) startdate = f"{s[4:6]}/01/{s[:4]}" ## If we ended up using a dt object, convert it back out if startdt: startdate = startdt.strftime("%d/%m/%Y") ## Set clean data show['start_date']= startdate ## Remove breaks from "description" (Excel isn't escaping newline characters) if show.get('description'): ## <br> characters sometimes sneak in as well show['description'] = show['description'].replace("\n"," ").replace("<br>","") ## Convert studio from object dict to simply name if show.get("studio"): show['studio'] = show['studio']['name'] ## Convert "tags" from object dicts to a string of names, and remove all spoiler tags show['oldtags'] = [] if show.get('tags'): show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']])) ## If "tags" list is empty, replace it with empty string else: show['tags'] = "" ## Convert "youtube_id" to url if show.get('youtube_id'): show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"]) ######### Generated Data ## Set the first episode's date (used for sorting purposes) show['first_episode'] = None ## Requires "airing" and "start_date" if show.get('airing') and show.get('start_date'): ## Create full datetime airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}" dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y") ## Convert to EST dt = dt.replace(tzinfo = web.JST).astimezone(web.EST) show['first_episode'] = dt.strftime("%d/%m/%Y") ## Airing time in EST show['EST_airing'] = "" ## Use "airing" to get the airtime in EST as HH:MM apm if show.get('airing'): dt = show['airing']['time'].astimezone(web.EST) ## Set clean data show['EST_airing'] = dt.strftime("%H:%M") ## Convert sites in "external_sites" to their own header ## There may be multiple sites in each site category ## (Official Site, Twitter, etc.), so we'll keep track ## of duplicates locally using enumeration and at the ## method scope using "siteheaders" ## Get a master list of external site names sitenames = list(set([site['site'] for site in show['external_links']])) ## For each unique site name for name in sitenames: ## Collect all occurrences count = [site for site in show['external_links'] if site['site'] == name] ## Enumerate so we can't create additional, unique headers as necessary ## Example Headers: Twitter[, Twitter 2, Twitter 3, ...]) for i,site in enumerate(count,start=1): ## The first occurence simply goes by the category of site if i == 1: duplicatename = name ## Otherwise append the occurence count else: duplicatename = f"{name} {i}" ## Keep track at the method level so that we can ## output data correctly if duplicatename not in siteheaders: siteheaders.append(duplicatename) ## Add to show dict show[duplicatename] = site['url'] ## Remove "external_links" because it is now redundant del show['external_links'] out.append(show) headers = list(APIDATAHEADERS) ## Added during cleaning and updating headers.insert(0,'category') headers.insert(4,"first_episode") headers.insert(5,"EST_airing") headers.extend(sorted(siteheaders)) ## Removed during cleaning and updating headers.remove("external_links") return headers,out def outputapidata_csv(filename, data, headers=None): """ Creates a CSV file with filename using data and headers (if supplied) """ with open(filename,'w',encoding='utf-8',newline = "", ) as f: if headers: writer = csv.DictWriter(f,fieldnames = headers) writer.writeheader() else: writer = csv.DictWriter(f) writer.writerows(out) def serializeshows(file,shows): """ Creates a json file containing the shows """ with open(file,'w', encoding = 'utf-8') as f: json.dump([show.serialize() for show in shows],f) def convertshowstostandard(data, season = None, showfactory = SeasonCharts.Show): """ Converts a full collection of API data to a list of standard Show Objects (via converttostandard) If season is provided, replace_season will be called before converting. """ data = check_rawdata(data) out = list() if season: replace_season(data) for show in data: out.append(converttostandard(show, showfactory = showfactory)) return out def converttostandard(show, showfactory = SeasonCharts.Show): """ Converts an AniChart Show to a standard Show Object """ if not isinstance(show,Show): raise TypeError("converttostandard requires an AniChart Show Object.") chartsource = [(CHARTNAME,show['id']),] if show.get("season") is None or not SeasonCharts.matchseason(show.get("season")): show['season'] = getseason(show) season = show['season'] japanese_title = show['title_japanese'] romaji_title = show['title_romaji'] english_title = show['title_english'] additional_titles = show['synonyms'] medium = show['type'] continuing = show['category'].lower() == "leftovers" summary = f"(From {CHARTNAME})\n{show.get('description')}" tags = [] for tag in show['tags']: tags.append((tag['name'],tag['spoiler'])) for genre in show['genres']: ## AniChart... generates "" Genres??? O.o if genre: tags.append((genre,False)) airingtime = show.get('airing') if not airingtime: airingtime = datetime.datetime(1,1,1) else: airingtime = datetime.datetime.fromtimestamp(airingtime['time']) startdate = fixstartdate(show.get("start_date")) startdate = f"{startdate} {airingtime.strftime('%H:%M')} +0900" episodes = show['total_episodes'] images = [show['image'],] studios = [] if show.get('studio'): studios = [show['studio']['name'],] links = [] if show.get('youtube_id'): links.append(("Youtube",YOUTUBELINK.format(youtube_id = show["youtube_id"]))) if show.get("anilist_link"): links.append(("Anilist",show['anilist_link'])) if show.get("mal_link"): links.append(("MAL",show['mal_link'])) for link in show.get("external_links",list()): links.append((link["site"],link["url"])) return showfactory(chartsource = chartsource, season = season, japanese_title = japanese_title, romaji_title = romaji_title, english_title = english_title, additional_titles = additional_titles, medium = medium, continuing = continuing, summary = summary, tags = tags, startdate = startdate, episodes = episodes, images = images, studios = studios, links = links) LINKRE = re.compile("""(?P<name>(?:\w| )+?)(?:\s+\d+|$)""") def checklink(key,value): """ Checks if a key,value pair is a website link, and strips any enumeration created by cleanapidata_csv """ try: if not value.startswith(("http","www")): return False, False ## Value is not string, so it can't be website link except: return False, False linkresearch = LINKRE.search(key) ## In normal practice this really shouldn't happen :-/ if not linkresearch: return False, False return linkresearch.group("name"), value if __name__ == "__main__": season,year = "Fall",2017 print("getting data") data = getshowsbyseason(season = season, year = year) data = consolidate_data(data) with open("data-anichart.json", 'w',encoding = 'utf-8') as f: json.dump(data,f) with open("data-anichart.json",'r', encoding = 'utf-8') as f: data = json.load(f) replace_season(data, f"{season}-{year}") data = [Show(**show) for show in data] #print("cleaning data") #headers,shows = cleanapidata(data, season = SeasonCharts.buildseason(season,year)) #print("serilizing") #serializeshows("output-anichart.json",shows) print("Converting to Standard") print(convertshowstostandard(data)) print('done')
""" Replaces the season value of a list of Show objects or data dicts Mutates the objects in-place. """ if not SeasonCharts.matchseason(season): raise SeasonCharts.SeasonError ## Check data format if test_rawdata(data): for cat,shows in data.items(): for show in shows: show['season'] = season else: for show in data: show['season'] = season
identifier_body
anichart.py
## Builtin import copy import csv import datetime import functools import json import re ## Custom Module import AL_Web as web from AL_Web import requests as alrequests from aldb2 import SeasonCharts CHARTNAME = "AniChart" APIURL = r"http://anichart.net/api/browse/anime?season={season}&year={year}&sort=-score&full_page=true&airing_data=true&page=1" ## For conversion of youtube_id API value YOUTUBELINK = "www.youtube.com/watch?v={youtube_id}" ## Ordered for desired output APIDATAHEADERS = ['title_japanese', 'title_romaji', 'title_english', 'first episode', 'airing', 'hashtag', 'image', 'airing_status', 'anilist_link', 'average_score', 'description', 'duration', 'end_date', 'external_links', 'genres', 'id', 'mal_link', 'popularity', 'rankings', 'season', 'source', 'start_date', 'studio', 'synonyms', 'tags', 'total_episodes', 'type', 'youtube_id'] sessiondecorator = alrequests.session_decorator_factory(useragent = True, referrer = "http://anichart.net") def checkcsrf(func): """ Decorator for functions that require anichart's csrf token ("X-CSRF-TOKEN"; i.e.- API calls) """ @functools.wraps(func) @sessiondecorator def wrapper(*args, session = None, **kw): if "X-CSRF-TOKEN" not in session.cookies: getcsrf(session) return func(*args,session = session, **kw) return wrapper def getcsrf(session): """ Adds a csrf token to the session """ session.get("http://anichart.net") class Show(dict): """ A simple container used to facilitate cleaning API data """ def __hash__(self): return self['id'] def serialize(self): out = copy.deepcopy(self) if out.get("airing"): out['airing'] = out['airing']['time'].strftime("%I:%M %p") return out @checkcsrf def getshowsbyseason(season,year, session = None): """ Queries the API for the given season-year and returns the API information """ url = APIURL.format(season=season, year = year) headers = {"X-CSRF-TOKEN":session.cookies['X-CSRF-TOKEN']} data = alrequests.GET_json(url,session = session, headers=headers) return {cat:[Show(**show) for show in shows] for cat,shows in data.items()} def consolidate_data(data): """ Consolidates the API data into a single list The original root value (i.e.- tv, leftovers) is added as the "category" key and title()'d. """ out = list() ## API data is organized in {category (tv,tvshort,movie,etc.):[list of show dicts]} for cat,shows in data.items(): for show in shows: show['category'] = cat.title() out.append(show) return out def test_rawdata(data): """ Checks if the data has been consolidated, returning True if it has not, otherwise False """ base = list(data)[0] if base in ["tv","leftovers","tv short","movie","OVA / ONA / Special"]: return True return False def check_rawdata(data): """ Checks if the data has been consolidated using test_rawdata; if it hasn't, this function will consolidate the data """ if test_rawdata(data): return consolidate_data(data) return data def fixstartdate(startdate): """ Converts startdate default "yyyymmdd" to "dd/mm/yyyy" If startdate is falsey, returns a default value of "01/01/2017" """ if not startdate: return "01/01/2017" s = str(startdate) d,m,y = [max(dt,1) for dt in [int(s[6:8]),int(s[4:6]),int(s[:4])]] return f"{d:0>2}/{m:0>2}/{y:0>4}" def getseason(data): """ Tries to determine the season that an anime aired based on its season value and it's ratings """ ## Season key is the most reliable season = data.get("season") if season: ## Season key is an integer formatted "YYS" and is 2000-based (i.e.- 171 == 2017-Winter) season = str(season) year = int(f"20{season[:2]}") ## Anichart Season key is 1-indexed season = int(season[2]) - 1 ## This should normally pass; if it consistently does not, we'll have to investigate why try: return SeasonCharts.buildseason(season,year) ## If something goes wrong, we'll try another method except: print(f"Failed to parse season: {data['season']}") ## Next, we'll iterate over rankings to try to determine the season/year ## There are multiple types of rankings based on season, year, and both combined, ## so we'll piece it together based on whatever we come across first season,year = None,None for ranking in data.get("rankings",list()): ## Quicker exit (without just making this loop its own function) if season and year: continue ## We'll ignore stuff we've already gotten and assume that nothing in ## rankings contradicts eachother if not season: ## Defaults to None one way or another if it's not supplied season = ranking.get("season") if not year: year = ranking.get("year") ## Check if we made it if season and year: ## As above, this should always work out-of-the-box try: return SeasonCharts.buildseason(season,year) except: print(season,year) ## Welp, we're stumped... return None def overwrite_season(function): """ A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts. The season parameter accepted by this decorator will NOT be passed on to the function. season is updated on the output, but in many cases will mutate the input as well due to the way this module handles data. """ @functools.wraps(function) def inner(*args, season = None, **kw): data = function(*args,**kw) if season: replace_season(data,season) return data return inner def replace_season(data,season): """ Replaces the season value of a list of Show objects or data dicts Mutates the objects in-place. """ if not SeasonCharts.matchseason(season): raise SeasonCharts.SeasonError ## Check data format if test_rawdata(data): for cat,shows in data.items(): for show in shows: show['season'] = season else: for show in data: show['season'] = season @overwrite_season def cleanapidata_csv(data): """ Cleans the API data for use in a CSV and returns a list of headers along with the clean data. Converts startdate to EST startdate. Adds firstepisode date. Adds EST_airing time. Features overwrite_season decorator Returns ([list of Headers (strs)],[list of Shows (dicts)]) """ out = list() siteheaders = [] data = check_rawdata(data) for show in data: if show.get("airing"): ## Convert airing to datetime for utility show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST) ## Format start_date to Day/Month/Year if show.get("start_date"): ## start_date is an integer representing yyyymmdd startdate = fixstartdate(show['startdate']) startdt = None ## Test that start_date is a valid date try: startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y") except: ## If not, use "airing" if show.get("airing"): ## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week) startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1) ## If "airing" doesn't work, the month and year are normally correct ## so we'll just replace the day else: s = str(show['startdate']) startdate = f"{s[4:6]}/01/{s[:4]}" ## If we ended up using a dt object, convert it back out if startdt: startdate = startdt.strftime("%d/%m/%Y") ## Set clean data show['start_date']= startdate ## Remove breaks from "description" (Excel isn't escaping newline characters) if show.get('description'): ## <br> characters sometimes sneak in as well show['description'] = show['description'].replace("\n"," ").replace("<br>","") ## Convert studio from object dict to simply name if show.get("studio"): show['studio'] = show['studio']['name'] ## Convert "tags" from object dicts to a string of names, and remove all spoiler tags show['oldtags'] = [] if show.get('tags'): show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']])) ## If "tags" list is empty, replace it with empty string else: show['tags'] = "" ## Convert "youtube_id" to url if show.get('youtube_id'): show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"]) ######### Generated Data ## Set the first episode's date (used for sorting purposes) show['first_episode'] = None ## Requires "airing" and "start_date" if show.get('airing') and show.get('start_date'): ## Create full datetime airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}" dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y") ## Convert to EST dt = dt.replace(tzinfo = web.JST).astimezone(web.EST) show['first_episode'] = dt.strftime("%d/%m/%Y") ## Airing time in EST show['EST_airing'] = "" ## Use "airing" to get the airtime in EST as HH:MM apm if show.get('airing'): dt = show['airing']['time'].astimezone(web.EST) ## Set clean data show['EST_airing'] = dt.strftime("%H:%M") ## Convert sites in "external_sites" to their own header ## There may be multiple sites in each site category ## (Official Site, Twitter, etc.), so we'll keep track ## of duplicates locally using enumeration and at the ## method scope using "siteheaders" ## Get a master list of external site names sitenames = list(set([site['site'] for site in show['external_links']])) ## For each unique site name for name in sitenames: ## Collect all occurrences count = [site for site in show['external_links'] if site['site'] == name] ## Enumerate so we can't create additional, unique headers as necessary ## Example Headers: Twitter[, Twitter 2, Twitter 3, ...]) for i,site in enumerate(count,start=1): ## The first occurence simply goes by the category of site if i == 1: duplicatename = name ## Otherwise append the occurence count else: duplicatename = f"{name} {i}" ## Keep track at the method level so that we can ## output data correctly if duplicatename not in siteheaders: siteheaders.append(duplicatename) ## Add to show dict show[duplicatename] = site['url'] ## Remove "external_links" because it is now redundant del show['external_links'] out.append(show) headers = list(APIDATAHEADERS) ## Added during cleaning and updating headers.insert(0,'category') headers.insert(4,"first_episode") headers.insert(5,"EST_airing") headers.extend(sorted(siteheaders)) ## Removed during cleaning and updating headers.remove("external_links") return headers,out def outputapidata_csv(filename, data, headers=None): """ Creates a CSV file with filename using data and headers (if supplied) """ with open(filename,'w',encoding='utf-8',newline = "", ) as f: if headers: writer = csv.DictWriter(f,fieldnames = headers) writer.writeheader() else: writer = csv.DictWriter(f) writer.writerows(out) def serializeshows(file,shows): """ Creates a json file containing the shows """ with open(file,'w', encoding = 'utf-8') as f: json.dump([show.serialize() for show in shows],f) def convertshowstostandard(data, season = None, showfactory = SeasonCharts.Show): """ Converts a full collection of API data to a list of standard Show Objects (via converttostandard) If season is provided, replace_season will be called before converting. """ data = check_rawdata(data) out = list() if season: replace_season(data) for show in data: out.append(converttostandard(show, showfactory = showfactory)) return out def converttostandard(show, showfactory = SeasonCharts.Show): """ Converts an AniChart Show to a standard Show Object """ if not isinstance(show,Show): raise TypeError("converttostandard requires an AniChart Show Object.") chartsource = [(CHARTNAME,show['id']),] if show.get("season") is None or not SeasonCharts.matchseason(show.get("season")): show['season'] = getseason(show) season = show['season'] japanese_title = show['title_japanese'] romaji_title = show['title_romaji'] english_title = show['title_english'] additional_titles = show['synonyms'] medium = show['type']
tags = [] for tag in show['tags']: tags.append((tag['name'],tag['spoiler'])) for genre in show['genres']: ## AniChart... generates "" Genres??? O.o if genre: tags.append((genre,False)) airingtime = show.get('airing') if not airingtime: airingtime = datetime.datetime(1,1,1) else: airingtime = datetime.datetime.fromtimestamp(airingtime['time']) startdate = fixstartdate(show.get("start_date")) startdate = f"{startdate} {airingtime.strftime('%H:%M')} +0900" episodes = show['total_episodes'] images = [show['image'],] studios = [] if show.get('studio'): studios = [show['studio']['name'],] links = [] if show.get('youtube_id'): links.append(("Youtube",YOUTUBELINK.format(youtube_id = show["youtube_id"]))) if show.get("anilist_link"): links.append(("Anilist",show['anilist_link'])) if show.get("mal_link"): links.append(("MAL",show['mal_link'])) for link in show.get("external_links",list()): links.append((link["site"],link["url"])) return showfactory(chartsource = chartsource, season = season, japanese_title = japanese_title, romaji_title = romaji_title, english_title = english_title, additional_titles = additional_titles, medium = medium, continuing = continuing, summary = summary, tags = tags, startdate = startdate, episodes = episodes, images = images, studios = studios, links = links) LINKRE = re.compile("""(?P<name>(?:\w| )+?)(?:\s+\d+|$)""") def checklink(key,value): """ Checks if a key,value pair is a website link, and strips any enumeration created by cleanapidata_csv """ try: if not value.startswith(("http","www")): return False, False ## Value is not string, so it can't be website link except: return False, False linkresearch = LINKRE.search(key) ## In normal practice this really shouldn't happen :-/ if not linkresearch: return False, False return linkresearch.group("name"), value if __name__ == "__main__": season,year = "Fall",2017 print("getting data") data = getshowsbyseason(season = season, year = year) data = consolidate_data(data) with open("data-anichart.json", 'w',encoding = 'utf-8') as f: json.dump(data,f) with open("data-anichart.json",'r', encoding = 'utf-8') as f: data = json.load(f) replace_season(data, f"{season}-{year}") data = [Show(**show) for show in data] #print("cleaning data") #headers,shows = cleanapidata(data, season = SeasonCharts.buildseason(season,year)) #print("serilizing") #serializeshows("output-anichart.json",shows) print("Converting to Standard") print(convertshowstostandard(data)) print('done')
continuing = show['category'].lower() == "leftovers" summary = f"(From {CHARTNAME})\n{show.get('description')}"
random_line_split
anichart.py
## Builtin import copy import csv import datetime import functools import json import re ## Custom Module import AL_Web as web from AL_Web import requests as alrequests from aldb2 import SeasonCharts CHARTNAME = "AniChart" APIURL = r"http://anichart.net/api/browse/anime?season={season}&year={year}&sort=-score&full_page=true&airing_data=true&page=1" ## For conversion of youtube_id API value YOUTUBELINK = "www.youtube.com/watch?v={youtube_id}" ## Ordered for desired output APIDATAHEADERS = ['title_japanese', 'title_romaji', 'title_english', 'first episode', 'airing', 'hashtag', 'image', 'airing_status', 'anilist_link', 'average_score', 'description', 'duration', 'end_date', 'external_links', 'genres', 'id', 'mal_link', 'popularity', 'rankings', 'season', 'source', 'start_date', 'studio', 'synonyms', 'tags', 'total_episodes', 'type', 'youtube_id'] sessiondecorator = alrequests.session_decorator_factory(useragent = True, referrer = "http://anichart.net") def checkcsrf(func): """ Decorator for functions that require anichart's csrf token ("X-CSRF-TOKEN"; i.e.- API calls) """ @functools.wraps(func) @sessiondecorator def wrapper(*args, session = None, **kw): if "X-CSRF-TOKEN" not in session.cookies: getcsrf(session) return func(*args,session = session, **kw) return wrapper def getcsrf(session): """ Adds a csrf token to the session """ session.get("http://anichart.net") class Show(dict): """ A simple container used to facilitate cleaning API data """ def __hash__(self): return self['id'] def serialize(self): out = copy.deepcopy(self) if out.get("airing"): out['airing'] = out['airing']['time'].strftime("%I:%M %p") return out @checkcsrf def getshowsbyseason(season,year, session = None): """ Queries the API for the given season-year and returns the API information """ url = APIURL.format(season=season, year = year) headers = {"X-CSRF-TOKEN":session.cookies['X-CSRF-TOKEN']} data = alrequests.GET_json(url,session = session, headers=headers) return {cat:[Show(**show) for show in shows] for cat,shows in data.items()} def consolidate_data(data): """ Consolidates the API data into a single list The original root value (i.e.- tv, leftovers) is added as the "category" key and title()'d. """ out = list() ## API data is organized in {category (tv,tvshort,movie,etc.):[list of show dicts]} for cat,shows in data.items(): for show in shows: show['category'] = cat.title() out.append(show) return out def test_rawdata(data): """ Checks if the data has been consolidated, returning True if it has not, otherwise False """ base = list(data)[0] if base in ["tv","leftovers","tv short","movie","OVA / ONA / Special"]: return True return False def check_rawdata(data): """ Checks if the data has been consolidated using test_rawdata; if it hasn't, this function will consolidate the data """ if test_rawdata(data): return consolidate_data(data) return data def fixstartdate(startdate): """ Converts startdate default "yyyymmdd" to "dd/mm/yyyy" If startdate is falsey, returns a default value of "01/01/2017" """ if not startdate: return "01/01/2017" s = str(startdate) d,m,y = [max(dt,1) for dt in [int(s[6:8]),int(s[4:6]),int(s[:4])]] return f"{d:0>2}/{m:0>2}/{y:0>4}" def getseason(data): """ Tries to determine the season that an anime aired based on its season value and it's ratings """ ## Season key is the most reliable season = data.get("season") if season: ## Season key is an integer formatted "YYS" and is 2000-based (i.e.- 171 == 2017-Winter) season = str(season) year = int(f"20{season[:2]}") ## Anichart Season key is 1-indexed season = int(season[2]) - 1 ## This should normally pass; if it consistently does not, we'll have to investigate why try: return SeasonCharts.buildseason(season,year) ## If something goes wrong, we'll try another method except: print(f"Failed to parse season: {data['season']}") ## Next, we'll iterate over rankings to try to determine the season/year ## There are multiple types of rankings based on season, year, and both combined, ## so we'll piece it together based on whatever we come across first season,year = None,None for ranking in data.get("rankings",list()): ## Quicker exit (without just making this loop its own function) if season and year: continue ## We'll ignore stuff we've already gotten and assume that nothing in ## rankings contradicts eachother if not season: ## Defaults to None one way or another if it's not supplied season = ranking.get("season") if not year: year = ranking.get("year") ## Check if we made it if season and year: ## As above, this should always work out-of-the-box try: return SeasonCharts.buildseason(season,year) except: print(season,year) ## Welp, we're stumped... return None def overwrite_season(function): """ A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts. The season parameter accepted by this decorator will NOT be passed on to the function. season is updated on the output, but in many cases will mutate the input as well due to the way this module handles data. """ @functools.wraps(function) def inner(*args, season = None, **kw): data = function(*args,**kw) if season: replace_season(data,season) return data return inner def replace_season(data,season): """ Replaces the season value of a list of Show objects or data dicts Mutates the objects in-place. """ if not SeasonCharts.matchseason(season): raise SeasonCharts.SeasonError ## Check data format if test_rawdata(data): for cat,shows in data.items(): for show in shows: show['season'] = season else: for show in data: show['season'] = season @overwrite_season def cleanapidata_csv(data): """ Cleans the API data for use in a CSV and returns a list of headers along with the clean data. Converts startdate to EST startdate. Adds firstepisode date. Adds EST_airing time. Features overwrite_season decorator Returns ([list of Headers (strs)],[list of Shows (dicts)]) """ out = list() siteheaders = [] data = check_rawdata(data) for show in data: if show.get("airing"): ## Convert airing to datetime for utility show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST) ## Format start_date to Day/Month/Year if show.get("start_date"): ## start_date is an integer representing yyyymmdd startdate = fixstartdate(show['startdate']) startdt = None ## Test that start_date is a valid date try: startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y") except: ## If not, use "airing" if show.get("airing"): ## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week) startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1) ## If "airing" doesn't work, the month and year are normally correct ## so we'll just replace the day else: s = str(show['startdate']) startdate = f"{s[4:6]}/01/{s[:4]}" ## If we ended up using a dt object, convert it back out if startdt: startdate = startdt.strftime("%d/%m/%Y") ## Set clean data show['start_date']= startdate ## Remove breaks from "description" (Excel isn't escaping newline characters) if show.get('description'): ## <br> characters sometimes sneak in as well show['description'] = show['description'].replace("\n"," ").replace("<br>","") ## Convert studio from object dict to simply name if show.get("studio"): show['studio'] = show['studio']['name'] ## Convert "tags" from object dicts to a string of names, and remove all spoiler tags show['oldtags'] = [] if show.get('tags'): show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']])) ## If "tags" list is empty, replace it with empty string else: show['tags'] = "" ## Convert "youtube_id" to url if show.get('youtube_id'): show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"]) ######### Generated Data ## Set the first episode's date (used for sorting purposes) show['first_episode'] = None ## Requires "airing" and "start_date" if show.get('airing') and show.get('start_date'): ## Create full datetime airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}" dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y") ## Convert to EST dt = dt.replace(tzinfo = web.JST).astimezone(web.EST) show['first_episode'] = dt.strftime("%d/%m/%Y") ## Airing time in EST show['EST_airing'] = "" ## Use "airing" to get the airtime in EST as HH:MM apm if show.get('airing'): dt = show['airing']['time'].astimezone(web.EST) ## Set clean data show['EST_airing'] = dt.strftime("%H:%M") ## Convert sites in "external_sites" to their own header ## There may be multiple sites in each site category ## (Official Site, Twitter, etc.), so we'll keep track ## of duplicates locally using enumeration and at the ## method scope using "siteheaders" ## Get a master list of external site names sitenames = list(set([site['site'] for site in show['external_links']])) ## For each unique site name for name in sitenames: ## Collect all occurrences count = [site for site in show['external_links'] if site['site'] == name] ## Enumerate so we can't create additional, unique headers as necessary ## Example Headers: Twitter[, Twitter 2, Twitter 3, ...]) for i,site in enumerate(count,start=1): ## The first occurence simply goes by the category of site if i == 1: duplicatename = name ## Otherwise append the occurence count else: duplicatename = f"{name} {i}" ## Keep track at the method level so that we can ## output data correctly if duplicatename not in siteheaders: siteheaders.append(duplicatename) ## Add to show dict show[duplicatename] = site['url'] ## Remove "external_links" because it is now redundant del show['external_links'] out.append(show) headers = list(APIDATAHEADERS) ## Added during cleaning and updating headers.insert(0,'category') headers.insert(4,"first_episode") headers.insert(5,"EST_airing") headers.extend(sorted(siteheaders)) ## Removed during cleaning and updating headers.remove("external_links") return headers,out def outputapidata_csv(filename, data, headers=None): """ Creates a CSV file with filename using data and headers (if supplied) """ with open(filename,'w',encoding='utf-8',newline = "", ) as f: if headers: writer = csv.DictWriter(f,fieldnames = headers) writer.writeheader() else: writer = csv.DictWriter(f) writer.writerows(out) def serializeshows(file,shows): """ Creates a json file containing the shows """ with open(file,'w', encoding = 'utf-8') as f: json.dump([show.serialize() for show in shows],f) def convertshowstostandard(data, season = None, showfactory = SeasonCharts.Show): """ Converts a full collection of API data to a list of standard Show Objects (via converttostandard) If season is provided, replace_season will be called before converting. """ data = check_rawdata(data) out = list() if season: replace_season(data) for show in data: out.append(converttostandard(show, showfactory = showfactory)) return out def
(show, showfactory = SeasonCharts.Show): """ Converts an AniChart Show to a standard Show Object """ if not isinstance(show,Show): raise TypeError("converttostandard requires an AniChart Show Object.") chartsource = [(CHARTNAME,show['id']),] if show.get("season") is None or not SeasonCharts.matchseason(show.get("season")): show['season'] = getseason(show) season = show['season'] japanese_title = show['title_japanese'] romaji_title = show['title_romaji'] english_title = show['title_english'] additional_titles = show['synonyms'] medium = show['type'] continuing = show['category'].lower() == "leftovers" summary = f"(From {CHARTNAME})\n{show.get('description')}" tags = [] for tag in show['tags']: tags.append((tag['name'],tag['spoiler'])) for genre in show['genres']: ## AniChart... generates "" Genres??? O.o if genre: tags.append((genre,False)) airingtime = show.get('airing') if not airingtime: airingtime = datetime.datetime(1,1,1) else: airingtime = datetime.datetime.fromtimestamp(airingtime['time']) startdate = fixstartdate(show.get("start_date")) startdate = f"{startdate} {airingtime.strftime('%H:%M')} +0900" episodes = show['total_episodes'] images = [show['image'],] studios = [] if show.get('studio'): studios = [show['studio']['name'],] links = [] if show.get('youtube_id'): links.append(("Youtube",YOUTUBELINK.format(youtube_id = show["youtube_id"]))) if show.get("anilist_link"): links.append(("Anilist",show['anilist_link'])) if show.get("mal_link"): links.append(("MAL",show['mal_link'])) for link in show.get("external_links",list()): links.append((link["site"],link["url"])) return showfactory(chartsource = chartsource, season = season, japanese_title = japanese_title, romaji_title = romaji_title, english_title = english_title, additional_titles = additional_titles, medium = medium, continuing = continuing, summary = summary, tags = tags, startdate = startdate, episodes = episodes, images = images, studios = studios, links = links) LINKRE = re.compile("""(?P<name>(?:\w| )+?)(?:\s+\d+|$)""") def checklink(key,value): """ Checks if a key,value pair is a website link, and strips any enumeration created by cleanapidata_csv """ try: if not value.startswith(("http","www")): return False, False ## Value is not string, so it can't be website link except: return False, False linkresearch = LINKRE.search(key) ## In normal practice this really shouldn't happen :-/ if not linkresearch: return False, False return linkresearch.group("name"), value if __name__ == "__main__": season,year = "Fall",2017 print("getting data") data = getshowsbyseason(season = season, year = year) data = consolidate_data(data) with open("data-anichart.json", 'w',encoding = 'utf-8') as f: json.dump(data,f) with open("data-anichart.json",'r', encoding = 'utf-8') as f: data = json.load(f) replace_season(data, f"{season}-{year}") data = [Show(**show) for show in data] #print("cleaning data") #headers,shows = cleanapidata(data, season = SeasonCharts.buildseason(season,year)) #print("serilizing") #serializeshows("output-anichart.json",shows) print("Converting to Standard") print(convertshowstostandard(data)) print('done')
converttostandard
identifier_name
anichart.py
## Builtin import copy import csv import datetime import functools import json import re ## Custom Module import AL_Web as web from AL_Web import requests as alrequests from aldb2 import SeasonCharts CHARTNAME = "AniChart" APIURL = r"http://anichart.net/api/browse/anime?season={season}&year={year}&sort=-score&full_page=true&airing_data=true&page=1" ## For conversion of youtube_id API value YOUTUBELINK = "www.youtube.com/watch?v={youtube_id}" ## Ordered for desired output APIDATAHEADERS = ['title_japanese', 'title_romaji', 'title_english', 'first episode', 'airing', 'hashtag', 'image', 'airing_status', 'anilist_link', 'average_score', 'description', 'duration', 'end_date', 'external_links', 'genres', 'id', 'mal_link', 'popularity', 'rankings', 'season', 'source', 'start_date', 'studio', 'synonyms', 'tags', 'total_episodes', 'type', 'youtube_id'] sessiondecorator = alrequests.session_decorator_factory(useragent = True, referrer = "http://anichart.net") def checkcsrf(func): """ Decorator for functions that require anichart's csrf token ("X-CSRF-TOKEN"; i.e.- API calls) """ @functools.wraps(func) @sessiondecorator def wrapper(*args, session = None, **kw): if "X-CSRF-TOKEN" not in session.cookies: getcsrf(session) return func(*args,session = session, **kw) return wrapper def getcsrf(session): """ Adds a csrf token to the session """ session.get("http://anichart.net") class Show(dict): """ A simple container used to facilitate cleaning API data """ def __hash__(self): return self['id'] def serialize(self): out = copy.deepcopy(self) if out.get("airing"): out['airing'] = out['airing']['time'].strftime("%I:%M %p") return out @checkcsrf def getshowsbyseason(season,year, session = None): """ Queries the API for the given season-year and returns the API information """ url = APIURL.format(season=season, year = year) headers = {"X-CSRF-TOKEN":session.cookies['X-CSRF-TOKEN']} data = alrequests.GET_json(url,session = session, headers=headers) return {cat:[Show(**show) for show in shows] for cat,shows in data.items()} def consolidate_data(data): """ Consolidates the API data into a single list The original root value (i.e.- tv, leftovers) is added as the "category" key and title()'d. """ out = list() ## API data is organized in {category (tv,tvshort,movie,etc.):[list of show dicts]} for cat,shows in data.items(): for show in shows: show['category'] = cat.title() out.append(show) return out def test_rawdata(data): """ Checks if the data has been consolidated, returning True if it has not, otherwise False """ base = list(data)[0] if base in ["tv","leftovers","tv short","movie","OVA / ONA / Special"]: return True return False def check_rawdata(data): """ Checks if the data has been consolidated using test_rawdata; if it hasn't, this function will consolidate the data """ if test_rawdata(data): return consolidate_data(data) return data def fixstartdate(startdate): """ Converts startdate default "yyyymmdd" to "dd/mm/yyyy" If startdate is falsey, returns a default value of "01/01/2017" """ if not startdate:
s = str(startdate) d,m,y = [max(dt,1) for dt in [int(s[6:8]),int(s[4:6]),int(s[:4])]] return f"{d:0>2}/{m:0>2}/{y:0>4}" def getseason(data): """ Tries to determine the season that an anime aired based on its season value and it's ratings """ ## Season key is the most reliable season = data.get("season") if season: ## Season key is an integer formatted "YYS" and is 2000-based (i.e.- 171 == 2017-Winter) season = str(season) year = int(f"20{season[:2]}") ## Anichart Season key is 1-indexed season = int(season[2]) - 1 ## This should normally pass; if it consistently does not, we'll have to investigate why try: return SeasonCharts.buildseason(season,year) ## If something goes wrong, we'll try another method except: print(f"Failed to parse season: {data['season']}") ## Next, we'll iterate over rankings to try to determine the season/year ## There are multiple types of rankings based on season, year, and both combined, ## so we'll piece it together based on whatever we come across first season,year = None,None for ranking in data.get("rankings",list()): ## Quicker exit (without just making this loop its own function) if season and year: continue ## We'll ignore stuff we've already gotten and assume that nothing in ## rankings contradicts eachother if not season: ## Defaults to None one way or another if it's not supplied season = ranking.get("season") if not year: year = ranking.get("year") ## Check if we made it if season and year: ## As above, this should always work out-of-the-box try: return SeasonCharts.buildseason(season,year) except: print(season,year) ## Welp, we're stumped... return None def overwrite_season(function): """ A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts. The season parameter accepted by this decorator will NOT be passed on to the function. season is updated on the output, but in many cases will mutate the input as well due to the way this module handles data. """ @functools.wraps(function) def inner(*args, season = None, **kw): data = function(*args,**kw) if season: replace_season(data,season) return data return inner def replace_season(data,season): """ Replaces the season value of a list of Show objects or data dicts Mutates the objects in-place. """ if not SeasonCharts.matchseason(season): raise SeasonCharts.SeasonError ## Check data format if test_rawdata(data): for cat,shows in data.items(): for show in shows: show['season'] = season else: for show in data: show['season'] = season @overwrite_season def cleanapidata_csv(data): """ Cleans the API data for use in a CSV and returns a list of headers along with the clean data. Converts startdate to EST startdate. Adds firstepisode date. Adds EST_airing time. Features overwrite_season decorator Returns ([list of Headers (strs)],[list of Shows (dicts)]) """ out = list() siteheaders = [] data = check_rawdata(data) for show in data: if show.get("airing"): ## Convert airing to datetime for utility show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST) ## Format start_date to Day/Month/Year if show.get("start_date"): ## start_date is an integer representing yyyymmdd startdate = fixstartdate(show['startdate']) startdt = None ## Test that start_date is a valid date try: startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y") except: ## If not, use "airing" if show.get("airing"): ## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week) startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1) ## If "airing" doesn't work, the month and year are normally correct ## so we'll just replace the day else: s = str(show['startdate']) startdate = f"{s[4:6]}/01/{s[:4]}" ## If we ended up using a dt object, convert it back out if startdt: startdate = startdt.strftime("%d/%m/%Y") ## Set clean data show['start_date']= startdate ## Remove breaks from "description" (Excel isn't escaping newline characters) if show.get('description'): ## <br> characters sometimes sneak in as well show['description'] = show['description'].replace("\n"," ").replace("<br>","") ## Convert studio from object dict to simply name if show.get("studio"): show['studio'] = show['studio']['name'] ## Convert "tags" from object dicts to a string of names, and remove all spoiler tags show['oldtags'] = [] if show.get('tags'): show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']])) ## If "tags" list is empty, replace it with empty string else: show['tags'] = "" ## Convert "youtube_id" to url if show.get('youtube_id'): show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"]) ######### Generated Data ## Set the first episode's date (used for sorting purposes) show['first_episode'] = None ## Requires "airing" and "start_date" if show.get('airing') and show.get('start_date'): ## Create full datetime airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}" dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y") ## Convert to EST dt = dt.replace(tzinfo = web.JST).astimezone(web.EST) show['first_episode'] = dt.strftime("%d/%m/%Y") ## Airing time in EST show['EST_airing'] = "" ## Use "airing" to get the airtime in EST as HH:MM apm if show.get('airing'): dt = show['airing']['time'].astimezone(web.EST) ## Set clean data show['EST_airing'] = dt.strftime("%H:%M") ## Convert sites in "external_sites" to their own header ## There may be multiple sites in each site category ## (Official Site, Twitter, etc.), so we'll keep track ## of duplicates locally using enumeration and at the ## method scope using "siteheaders" ## Get a master list of external site names sitenames = list(set([site['site'] for site in show['external_links']])) ## For each unique site name for name in sitenames: ## Collect all occurrences count = [site for site in show['external_links'] if site['site'] == name] ## Enumerate so we can't create additional, unique headers as necessary ## Example Headers: Twitter[, Twitter 2, Twitter 3, ...]) for i,site in enumerate(count,start=1): ## The first occurence simply goes by the category of site if i == 1: duplicatename = name ## Otherwise append the occurence count else: duplicatename = f"{name} {i}" ## Keep track at the method level so that we can ## output data correctly if duplicatename not in siteheaders: siteheaders.append(duplicatename) ## Add to show dict show[duplicatename] = site['url'] ## Remove "external_links" because it is now redundant del show['external_links'] out.append(show) headers = list(APIDATAHEADERS) ## Added during cleaning and updating headers.insert(0,'category') headers.insert(4,"first_episode") headers.insert(5,"EST_airing") headers.extend(sorted(siteheaders)) ## Removed during cleaning and updating headers.remove("external_links") return headers,out def outputapidata_csv(filename, data, headers=None): """ Creates a CSV file with filename using data and headers (if supplied) """ with open(filename,'w',encoding='utf-8',newline = "", ) as f: if headers: writer = csv.DictWriter(f,fieldnames = headers) writer.writeheader() else: writer = csv.DictWriter(f) writer.writerows(out) def serializeshows(file,shows): """ Creates a json file containing the shows """ with open(file,'w', encoding = 'utf-8') as f: json.dump([show.serialize() for show in shows],f) def convertshowstostandard(data, season = None, showfactory = SeasonCharts.Show): """ Converts a full collection of API data to a list of standard Show Objects (via converttostandard) If season is provided, replace_season will be called before converting. """ data = check_rawdata(data) out = list() if season: replace_season(data) for show in data: out.append(converttostandard(show, showfactory = showfactory)) return out def converttostandard(show, showfactory = SeasonCharts.Show): """ Converts an AniChart Show to a standard Show Object """ if not isinstance(show,Show): raise TypeError("converttostandard requires an AniChart Show Object.") chartsource = [(CHARTNAME,show['id']),] if show.get("season") is None or not SeasonCharts.matchseason(show.get("season")): show['season'] = getseason(show) season = show['season'] japanese_title = show['title_japanese'] romaji_title = show['title_romaji'] english_title = show['title_english'] additional_titles = show['synonyms'] medium = show['type'] continuing = show['category'].lower() == "leftovers" summary = f"(From {CHARTNAME})\n{show.get('description')}" tags = [] for tag in show['tags']: tags.append((tag['name'],tag['spoiler'])) for genre in show['genres']: ## AniChart... generates "" Genres??? O.o if genre: tags.append((genre,False)) airingtime = show.get('airing') if not airingtime: airingtime = datetime.datetime(1,1,1) else: airingtime = datetime.datetime.fromtimestamp(airingtime['time']) startdate = fixstartdate(show.get("start_date")) startdate = f"{startdate} {airingtime.strftime('%H:%M')} +0900" episodes = show['total_episodes'] images = [show['image'],] studios = [] if show.get('studio'): studios = [show['studio']['name'],] links = [] if show.get('youtube_id'): links.append(("Youtube",YOUTUBELINK.format(youtube_id = show["youtube_id"]))) if show.get("anilist_link"): links.append(("Anilist",show['anilist_link'])) if show.get("mal_link"): links.append(("MAL",show['mal_link'])) for link in show.get("external_links",list()): links.append((link["site"],link["url"])) return showfactory(chartsource = chartsource, season = season, japanese_title = japanese_title, romaji_title = romaji_title, english_title = english_title, additional_titles = additional_titles, medium = medium, continuing = continuing, summary = summary, tags = tags, startdate = startdate, episodes = episodes, images = images, studios = studios, links = links) LINKRE = re.compile("""(?P<name>(?:\w| )+?)(?:\s+\d+|$)""") def checklink(key,value): """ Checks if a key,value pair is a website link, and strips any enumeration created by cleanapidata_csv """ try: if not value.startswith(("http","www")): return False, False ## Value is not string, so it can't be website link except: return False, False linkresearch = LINKRE.search(key) ## In normal practice this really shouldn't happen :-/ if not linkresearch: return False, False return linkresearch.group("name"), value if __name__ == "__main__": season,year = "Fall",2017 print("getting data") data = getshowsbyseason(season = season, year = year) data = consolidate_data(data) with open("data-anichart.json", 'w',encoding = 'utf-8') as f: json.dump(data,f) with open("data-anichart.json",'r', encoding = 'utf-8') as f: data = json.load(f) replace_season(data, f"{season}-{year}") data = [Show(**show) for show in data] #print("cleaning data") #headers,shows = cleanapidata(data, season = SeasonCharts.buildseason(season,year)) #print("serilizing") #serializeshows("output-anichart.json",shows) print("Converting to Standard") print(convertshowstostandard(data)) print('done')
return "01/01/2017"
conditional_block
runIPRscan.py
#!/usr/bin/env python # $Id: iprscan5_urllib2.py 2809 2015-03-13 16:10:25Z uludag $ # ====================================================================== # # Copyright 2009-2014 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ====================================================================== # InterProScan 5 (REST) Python client using urllib2 and # xmltramp (http://www.aaronsw.com/2002/xmltramp/). # # Tested with: # Python 2.6.5 (Ubuntu 10.04 LTS) # Python 2.7.3 (Ubuntu 12.04 LTS) # # See: # http://www.ebi.ac.uk/Tools/webservices/services/pfa/iprscan5_rest # http://www.ebi.ac.uk/Tools/webservices/tutorials/python # ====================================================================== # Base URL for service import urllib.request, urllib.error, urllib.parse import urllib.request, urllib.parse, urllib.error import time import sys import re import os import platform import argparse import xmltramp baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/iprscan5' # Load libraries # Set interval for checking status checkInterval = 10 # Output level outputLevel = 1 # Debug level debugLevel = 0 # Number of option arguments. numOpts = len(sys.argv) # Usage message parser = argparse.ArgumentParser() # Tool specific options parser.add_argument('--input', required=True, help='input FASTA file') parser.add_argument('--appl', help='signature methods to use, see --paramDetail appl') parser.add_argument('--crc', action="store_true", help='enable InterProScan Matches look-up (ignored)') parser.add_argument('--nocrc', action="store_true", help='disable InterProScan Matches look-up (ignored)') parser.add_argument('--goterms', action="store_true", help='enable inclusion of GO terms') parser.add_argument('--nogoterms', action="store_true", help='disable inclusion of GO terms') parser.add_argument('--pathways', action="store_true", help='enable inclusion of pathway terms') parser.add_argument('--nopathways', action="store_true", help='disable inclusion of pathway terms') parser.add_argument('--sequence', help='input sequence file name') # General options parser.add_argument('--email', required=True, help='e-mail address') parser.add_argument('--title', help='job title') parser.add_argument('--outfile', help='file name for results') parser.add_argument('--outformat', help='output format for results') parser.add_argument('--async', action='store_true', help='asynchronous mode') parser.add_argument('--jobid', help='job identifier') parser.add_argument('--polljob', action="store_true", help='get job result') parser.add_argument('--status', action="store_true", help='get job status') parser.add_argument('--resultTypes', action='store_true', help='get result types') parser.add_argument('--params', action='store_true', help='list input parameters') parser.add_argument('--paramDetail', help='get details for parameter') parser.add_argument('--quiet', action='store_true', help='decrease output level') parser.add_argument('--verbose', action='store_true', help='increase output level') parser.add_argument('--baseURL', default=baseUrl, help='Base URL for service') parser.add_argument('--debugLevel', type=int, default=debugLevel, help='debug output level') options = parser.parse_args() # Increase output level if options.verbose: outputLevel += 1 # Decrease output level if options.quiet: outputLevel -= 1 # Debug level if options.debugLevel: debugLevel = options.debugLevel # Debug print def printDebugMessage(functionName, message, level): if(level <= debugLevel): print('[' + functionName + '] ' + message, file=sys.stderr) # User-agent for request (see RFC2616). def getUserAgent(): printDebugMessage('getUserAgent', 'Begin', 11) # Agent string for urllib2 library. urllib_agent = 'Python-urllib/%s' % urllib2.__version__ clientRevision = '$Revision: 2809 $' clientVersion = '0' if len(clientRevision) > 11: clientVersion = clientRevision[11:-2] # Prepend client specific agent string. user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % ( clientVersion, os.path.basename(__file__), platform.python_version(), platform.system(), urllib_agent ) printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12) printDebugMessage('getUserAgent', 'End', 11) return user_agent # Wrapper for a REST (HTTP GET) request def restRequest(url): printDebugMessage('restRequest', 'Begin', 11) printDebugMessage('restRequest', 'url: ' + url, 11) # Errors are indicated by HTTP status codes. try: # Set the User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(url, None, http_headers) # Make the request (HTTP GET). reqH = urllib.request.urlopen(req) result = reqH.read() reqH.close() # Errors are indicated by HTTP status codes. except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('restRequest', 'End', 11) return result # Get input parameters list def serviceGetParameters(): printDebugMessage('serviceGetParameters', 'Begin', 1) requestUrl = baseUrl + '/parameters' printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameters', 'End', 1) return doc['id':] # Print list of parameters def printGetParameters(): printDebugMessage('printGetParameters', 'Begin', 1) idList = serviceGetParameters() for id in idList: print(id) printDebugMessage('printGetParameters', 'End', 1) # Get input parameter information def serviceGetParameterDetails(paramName): printDebugMessage('serviceGetParameterDetails', 'Begin', 1) printDebugMessage('serviceGetParameterDetails', 'paramName: ' + paramName, 2) requestUrl = baseUrl + '/parameterdetails/' + paramName printDebugMessage('serviceGetParameterDetails', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameterDetails', 'End', 1) return doc # Print description of a parameter def printGetParameterDetails(paramName): printDebugMessage('printGetParameterDetails', 'Begin', 1) doc = serviceGetParameterDetails(paramName) print(str(doc.name) + "\t" + str(doc.type)) print(doc.description) for value in doc.values: print(value.value, end=' ') if str(value.defaultValue) == 'true': print('default', end=' ') print() print("\t" + str(value.label)) if(hasattr(value, 'properties')): for wsProperty in value.properties: print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value)) #print doc printDebugMessage('printGetParameterDetails', 'End', 1) # Submit job def serviceRun(email, title, params): printDebugMessage('serviceRun', 'Begin', 1) # Insert e-mail and title into params params['email'] = email if title: params['title'] = title requestUrl = baseUrl + '/run/' printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2) # Signature methods requires special handling (list) applData = '' if 'appl' in params: # So extract from params applList = params['appl'] del params['appl'] # Build the method data options for appl in applList: applData += '&appl=' + appl # Get the data for the other options requestData = urllib.parse.urlencode(params) # Concatenate the two parts. requestData += applData printDebugMessage('serviceRun', 'requestData: ' + requestData, 2) # Errors are indicated by HTTP status codes. try: # Set the HTTP User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(requestUrl, None, http_headers) # Make the submission (HTTP POST). reqH = urllib.request.urlopen(req, requestData) jobId = reqH.read() reqH.close() except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('serviceRun', 'jobId: ' + jobId, 2) printDebugMessage('serviceRun', 'End', 1) return jobId # Get job status def serviceGetStatus(jobId): printDebugMessage('serviceGetStatus', 'Begin', 1) printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/status/' + jobId printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2) status = restRequest(requestUrl) printDebugMessage('serviceGetStatus', 'status: ' + status, 2) printDebugMessage('serviceGetStatus', 'End', 1) return status # Print the status of a job def printGetStatus(jobId): printDebugMessage('printGetStatus', 'Begin', 1) status = serviceGetStatus(jobId) print(status) printDebugMessage('printGetStatus', 'End', 1) # Get available result types for job def serviceGetResultTypes(jobId): printDebugMessage('serviceGetResultTypes', 'Begin', 1) printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/resulttypes/' + jobId printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetResultTypes', 'End', 1) return doc['type':] # Print list of available result types for a job. def printGetResultTypes(jobId): printDebugMessage('printGetResultTypes', 'Begin', 1) resultTypeList = serviceGetResultTypes(jobId) for resultType in resultTypeList: print(resultType['identifier']) if(hasattr(resultType, 'label')): print("\t", resultType['label']) if(hasattr(resultType, 'description')): print("\t", resultType['description']) if(hasattr(resultType, 'mediaType')): print("\t", resultType['mediaType']) if(hasattr(resultType, 'fileSuffix')): print("\t", resultType['fileSuffix']) printDebugMessage('printGetResultTypes', 'End', 1) # Get result def serviceGetResult(jobId, type_): printDebugMessage('serviceGetResult', 'Begin', 1) printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2) printDebugMessage('serviceGetResult', 'type_: ' + type_, 2) requestUrl = baseUrl + '/result/' + jobId + '/' + type_ result = restRequest(requestUrl) printDebugMessage('serviceGetResult', 'End', 1) return result # Client-side poll def clientPoll(jobId): printDebugMessage('clientPoll', 'Begin', 1) result = 'PENDING' while result == 'RUNNING' or result == 'PENDING': result = serviceGetStatus(jobId) print(result, file=sys.stderr) if result == 'RUNNING' or result == 'PENDING': time.sleep(checkInterval) printDebugMessage('clientPoll', 'End', 1) # Get result for a jobid def getResult(jobId): printDebugMessage('getResult', 'Begin', 1) printDebugMessage('getResult', 'jobId: ' + jobId, 1) # Check status and wait if necessary clientPoll(jobId) # Get available result types resultTypes = serviceGetResultTypes(jobId) for resultType in resultTypes: # Derive the filename for the result if options.outfile: filename = options.outfile + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) else: filename = jobId + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) # Write a result file if not options.outformat or options.outformat == str(resultType['identifier']): # Get the result result = serviceGetResult(jobId, str(resultType['identifier'])) fh = open(filename, 'w') fh.write(result) fh.close() print(filename) printDebugMessage('getResult', 'End', 1) # Read a file def readFile(filename): printDebugMessage('readFile', 'Begin', 1) fh = open(filename, 'r') data = fh.read() fh.close() printDebugMessage('readFile', 'End', 1) return data # No options... print help. if numOpts < 2: parser.print_help() # List parameters elif options.params: printGetParameters() # Get parameter details elif options.paramDetail: printGetParameterDetails(options.paramDetail) # Submit job elif options.email and not options.jobid: params = {} if 1 > 0: if os.access(options.input, os.R_OK): # Read file into content
else: # Argument is a sequence id params['sequence'] = options.input elif options.sequence: # Specified via option if os.access(options.sequence, os.R_OK): # Read file into content params['sequence'] = readFile(options.sequence) else: # Argument is a sequence id params['sequence'] = options.sequence # Map flag options to boolean values. # if options.crc: # params['crc'] = True # elif options.nocrc: # params['crc'] = False if options.goterms: params['goterms'] = True elif options.nogoterms: params['goterms'] = False if options.pathways: params['pathways'] = True elif options.nopathways: params['pathways'] = False # Add the other options (if defined) if options.appl: params['appl'] = re.split('[ \t\n,;]+', options.appl) # Submit the job jobid = serviceRun(options.email, options.title, params) if options.async: # Async mode print(jobid) else: # Sync mode print(jobid, file=sys.stderr) time.sleep(5) getResult(jobid) # Get job status elif options.status and options.jobid: printGetStatus(options.jobid) # List result types for job elif options.resultTypes and options.jobid: printGetResultTypes(options.jobid) # Get results for job elif options.polljob and options.jobid: getResult(options.jobid) else: print('Error: unrecognised argument combination', file=sys.stderr) parser.print_help()
params['sequence'] = readFile(options.input)
conditional_block
runIPRscan.py
#!/usr/bin/env python # $Id: iprscan5_urllib2.py 2809 2015-03-13 16:10:25Z uludag $ # ====================================================================== # # Copyright 2009-2014 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ====================================================================== # InterProScan 5 (REST) Python client using urllib2 and # xmltramp (http://www.aaronsw.com/2002/xmltramp/). # # Tested with: # Python 2.6.5 (Ubuntu 10.04 LTS) # Python 2.7.3 (Ubuntu 12.04 LTS) # # See: # http://www.ebi.ac.uk/Tools/webservices/services/pfa/iprscan5_rest # http://www.ebi.ac.uk/Tools/webservices/tutorials/python # ====================================================================== # Base URL for service import urllib.request, urllib.error, urllib.parse import urllib.request, urllib.parse, urllib.error import time import sys import re import os import platform import argparse import xmltramp baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/iprscan5' # Load libraries # Set interval for checking status checkInterval = 10 # Output level outputLevel = 1 # Debug level debugLevel = 0 # Number of option arguments. numOpts = len(sys.argv) # Usage message parser = argparse.ArgumentParser() # Tool specific options parser.add_argument('--input', required=True, help='input FASTA file') parser.add_argument('--appl', help='signature methods to use, see --paramDetail appl') parser.add_argument('--crc', action="store_true", help='enable InterProScan Matches look-up (ignored)') parser.add_argument('--nocrc', action="store_true", help='disable InterProScan Matches look-up (ignored)') parser.add_argument('--goterms', action="store_true", help='enable inclusion of GO terms') parser.add_argument('--nogoterms', action="store_true", help='disable inclusion of GO terms') parser.add_argument('--pathways', action="store_true", help='enable inclusion of pathway terms') parser.add_argument('--nopathways', action="store_true", help='disable inclusion of pathway terms') parser.add_argument('--sequence', help='input sequence file name') # General options parser.add_argument('--email', required=True, help='e-mail address') parser.add_argument('--title', help='job title') parser.add_argument('--outfile', help='file name for results') parser.add_argument('--outformat', help='output format for results') parser.add_argument('--async', action='store_true', help='asynchronous mode') parser.add_argument('--jobid', help='job identifier') parser.add_argument('--polljob', action="store_true", help='get job result') parser.add_argument('--status', action="store_true", help='get job status') parser.add_argument('--resultTypes', action='store_true', help='get result types') parser.add_argument('--params', action='store_true', help='list input parameters') parser.add_argument('--paramDetail', help='get details for parameter') parser.add_argument('--quiet', action='store_true', help='decrease output level') parser.add_argument('--verbose', action='store_true', help='increase output level') parser.add_argument('--baseURL', default=baseUrl, help='Base URL for service') parser.add_argument('--debugLevel', type=int, default=debugLevel, help='debug output level') options = parser.parse_args() # Increase output level if options.verbose: outputLevel += 1 # Decrease output level if options.quiet: outputLevel -= 1 # Debug level if options.debugLevel: debugLevel = options.debugLevel # Debug print def printDebugMessage(functionName, message, level): if(level <= debugLevel): print('[' + functionName + '] ' + message, file=sys.stderr) # User-agent for request (see RFC2616). def getUserAgent(): printDebugMessage('getUserAgent', 'Begin', 11) # Agent string for urllib2 library. urllib_agent = 'Python-urllib/%s' % urllib2.__version__ clientRevision = '$Revision: 2809 $' clientVersion = '0' if len(clientRevision) > 11: clientVersion = clientRevision[11:-2] # Prepend client specific agent string. user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % ( clientVersion, os.path.basename(__file__), platform.python_version(), platform.system(), urllib_agent ) printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12) printDebugMessage('getUserAgent', 'End', 11) return user_agent # Wrapper for a REST (HTTP GET) request def restRequest(url): printDebugMessage('restRequest', 'Begin', 11) printDebugMessage('restRequest', 'url: ' + url, 11) # Errors are indicated by HTTP status codes. try: # Set the User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(url, None, http_headers) # Make the request (HTTP GET). reqH = urllib.request.urlopen(req) result = reqH.read() reqH.close() # Errors are indicated by HTTP status codes. except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('restRequest', 'End', 11) return result # Get input parameters list def serviceGetParameters(): printDebugMessage('serviceGetParameters', 'Begin', 1) requestUrl = baseUrl + '/parameters' printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameters', 'End', 1) return doc['id':] # Print list of parameters def printGetParameters(): printDebugMessage('printGetParameters', 'Begin', 1) idList = serviceGetParameters() for id in idList: print(id) printDebugMessage('printGetParameters', 'End', 1) # Get input parameter information def serviceGetParameterDetails(paramName): printDebugMessage('serviceGetParameterDetails', 'Begin', 1) printDebugMessage('serviceGetParameterDetails', 'paramName: ' + paramName, 2) requestUrl = baseUrl + '/parameterdetails/' + paramName printDebugMessage('serviceGetParameterDetails', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameterDetails', 'End', 1) return doc # Print description of a parameter def
(paramName): printDebugMessage('printGetParameterDetails', 'Begin', 1) doc = serviceGetParameterDetails(paramName) print(str(doc.name) + "\t" + str(doc.type)) print(doc.description) for value in doc.values: print(value.value, end=' ') if str(value.defaultValue) == 'true': print('default', end=' ') print() print("\t" + str(value.label)) if(hasattr(value, 'properties')): for wsProperty in value.properties: print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value)) #print doc printDebugMessage('printGetParameterDetails', 'End', 1) # Submit job def serviceRun(email, title, params): printDebugMessage('serviceRun', 'Begin', 1) # Insert e-mail and title into params params['email'] = email if title: params['title'] = title requestUrl = baseUrl + '/run/' printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2) # Signature methods requires special handling (list) applData = '' if 'appl' in params: # So extract from params applList = params['appl'] del params['appl'] # Build the method data options for appl in applList: applData += '&appl=' + appl # Get the data for the other options requestData = urllib.parse.urlencode(params) # Concatenate the two parts. requestData += applData printDebugMessage('serviceRun', 'requestData: ' + requestData, 2) # Errors are indicated by HTTP status codes. try: # Set the HTTP User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(requestUrl, None, http_headers) # Make the submission (HTTP POST). reqH = urllib.request.urlopen(req, requestData) jobId = reqH.read() reqH.close() except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('serviceRun', 'jobId: ' + jobId, 2) printDebugMessage('serviceRun', 'End', 1) return jobId # Get job status def serviceGetStatus(jobId): printDebugMessage('serviceGetStatus', 'Begin', 1) printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/status/' + jobId printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2) status = restRequest(requestUrl) printDebugMessage('serviceGetStatus', 'status: ' + status, 2) printDebugMessage('serviceGetStatus', 'End', 1) return status # Print the status of a job def printGetStatus(jobId): printDebugMessage('printGetStatus', 'Begin', 1) status = serviceGetStatus(jobId) print(status) printDebugMessage('printGetStatus', 'End', 1) # Get available result types for job def serviceGetResultTypes(jobId): printDebugMessage('serviceGetResultTypes', 'Begin', 1) printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/resulttypes/' + jobId printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetResultTypes', 'End', 1) return doc['type':] # Print list of available result types for a job. def printGetResultTypes(jobId): printDebugMessage('printGetResultTypes', 'Begin', 1) resultTypeList = serviceGetResultTypes(jobId) for resultType in resultTypeList: print(resultType['identifier']) if(hasattr(resultType, 'label')): print("\t", resultType['label']) if(hasattr(resultType, 'description')): print("\t", resultType['description']) if(hasattr(resultType, 'mediaType')): print("\t", resultType['mediaType']) if(hasattr(resultType, 'fileSuffix')): print("\t", resultType['fileSuffix']) printDebugMessage('printGetResultTypes', 'End', 1) # Get result def serviceGetResult(jobId, type_): printDebugMessage('serviceGetResult', 'Begin', 1) printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2) printDebugMessage('serviceGetResult', 'type_: ' + type_, 2) requestUrl = baseUrl + '/result/' + jobId + '/' + type_ result = restRequest(requestUrl) printDebugMessage('serviceGetResult', 'End', 1) return result # Client-side poll def clientPoll(jobId): printDebugMessage('clientPoll', 'Begin', 1) result = 'PENDING' while result == 'RUNNING' or result == 'PENDING': result = serviceGetStatus(jobId) print(result, file=sys.stderr) if result == 'RUNNING' or result == 'PENDING': time.sleep(checkInterval) printDebugMessage('clientPoll', 'End', 1) # Get result for a jobid def getResult(jobId): printDebugMessage('getResult', 'Begin', 1) printDebugMessage('getResult', 'jobId: ' + jobId, 1) # Check status and wait if necessary clientPoll(jobId) # Get available result types resultTypes = serviceGetResultTypes(jobId) for resultType in resultTypes: # Derive the filename for the result if options.outfile: filename = options.outfile + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) else: filename = jobId + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) # Write a result file if not options.outformat or options.outformat == str(resultType['identifier']): # Get the result result = serviceGetResult(jobId, str(resultType['identifier'])) fh = open(filename, 'w') fh.write(result) fh.close() print(filename) printDebugMessage('getResult', 'End', 1) # Read a file def readFile(filename): printDebugMessage('readFile', 'Begin', 1) fh = open(filename, 'r') data = fh.read() fh.close() printDebugMessage('readFile', 'End', 1) return data # No options... print help. if numOpts < 2: parser.print_help() # List parameters elif options.params: printGetParameters() # Get parameter details elif options.paramDetail: printGetParameterDetails(options.paramDetail) # Submit job elif options.email and not options.jobid: params = {} if 1 > 0: if os.access(options.input, os.R_OK): # Read file into content params['sequence'] = readFile(options.input) else: # Argument is a sequence id params['sequence'] = options.input elif options.sequence: # Specified via option if os.access(options.sequence, os.R_OK): # Read file into content params['sequence'] = readFile(options.sequence) else: # Argument is a sequence id params['sequence'] = options.sequence # Map flag options to boolean values. # if options.crc: # params['crc'] = True # elif options.nocrc: # params['crc'] = False if options.goterms: params['goterms'] = True elif options.nogoterms: params['goterms'] = False if options.pathways: params['pathways'] = True elif options.nopathways: params['pathways'] = False # Add the other options (if defined) if options.appl: params['appl'] = re.split('[ \t\n,;]+', options.appl) # Submit the job jobid = serviceRun(options.email, options.title, params) if options.async: # Async mode print(jobid) else: # Sync mode print(jobid, file=sys.stderr) time.sleep(5) getResult(jobid) # Get job status elif options.status and options.jobid: printGetStatus(options.jobid) # List result types for job elif options.resultTypes and options.jobid: printGetResultTypes(options.jobid) # Get results for job elif options.polljob and options.jobid: getResult(options.jobid) else: print('Error: unrecognised argument combination', file=sys.stderr) parser.print_help()
printGetParameterDetails
identifier_name
runIPRscan.py
#!/usr/bin/env python # $Id: iprscan5_urllib2.py 2809 2015-03-13 16:10:25Z uludag $ # ====================================================================== # # Copyright 2009-2014 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ====================================================================== # InterProScan 5 (REST) Python client using urllib2 and # xmltramp (http://www.aaronsw.com/2002/xmltramp/). # # Tested with: # Python 2.6.5 (Ubuntu 10.04 LTS) # Python 2.7.3 (Ubuntu 12.04 LTS) # # See: # http://www.ebi.ac.uk/Tools/webservices/services/pfa/iprscan5_rest # http://www.ebi.ac.uk/Tools/webservices/tutorials/python # ====================================================================== # Base URL for service import urllib.request, urllib.error, urllib.parse import urllib.request, urllib.parse, urllib.error import time import sys import re import os import platform import argparse import xmltramp baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/iprscan5' # Load libraries # Set interval for checking status checkInterval = 10 # Output level outputLevel = 1 # Debug level debugLevel = 0 # Number of option arguments. numOpts = len(sys.argv) # Usage message parser = argparse.ArgumentParser() # Tool specific options parser.add_argument('--input', required=True, help='input FASTA file') parser.add_argument('--appl', help='signature methods to use, see --paramDetail appl') parser.add_argument('--crc', action="store_true", help='enable InterProScan Matches look-up (ignored)') parser.add_argument('--nocrc', action="store_true", help='disable InterProScan Matches look-up (ignored)') parser.add_argument('--goterms', action="store_true", help='enable inclusion of GO terms') parser.add_argument('--nogoterms', action="store_true", help='disable inclusion of GO terms') parser.add_argument('--pathways', action="store_true", help='enable inclusion of pathway terms') parser.add_argument('--nopathways', action="store_true", help='disable inclusion of pathway terms') parser.add_argument('--sequence', help='input sequence file name') # General options parser.add_argument('--email', required=True, help='e-mail address') parser.add_argument('--title', help='job title') parser.add_argument('--outfile', help='file name for results') parser.add_argument('--outformat', help='output format for results') parser.add_argument('--async', action='store_true', help='asynchronous mode') parser.add_argument('--jobid', help='job identifier') parser.add_argument('--polljob', action="store_true", help='get job result') parser.add_argument('--status', action="store_true", help='get job status') parser.add_argument('--resultTypes', action='store_true', help='get result types') parser.add_argument('--params', action='store_true', help='list input parameters') parser.add_argument('--paramDetail', help='get details for parameter') parser.add_argument('--quiet', action='store_true', help='decrease output level') parser.add_argument('--verbose', action='store_true', help='increase output level') parser.add_argument('--baseURL', default=baseUrl, help='Base URL for service') parser.add_argument('--debugLevel', type=int, default=debugLevel, help='debug output level') options = parser.parse_args() # Increase output level if options.verbose: outputLevel += 1 # Decrease output level if options.quiet: outputLevel -= 1 # Debug level if options.debugLevel: debugLevel = options.debugLevel # Debug print def printDebugMessage(functionName, message, level): if(level <= debugLevel): print('[' + functionName + '] ' + message, file=sys.stderr) # User-agent for request (see RFC2616). def getUserAgent(): printDebugMessage('getUserAgent', 'Begin', 11) # Agent string for urllib2 library. urllib_agent = 'Python-urllib/%s' % urllib2.__version__ clientRevision = '$Revision: 2809 $' clientVersion = '0' if len(clientRevision) > 11: clientVersion = clientRevision[11:-2] # Prepend client specific agent string. user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % ( clientVersion, os.path.basename(__file__), platform.python_version(), platform.system(), urllib_agent ) printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12) printDebugMessage('getUserAgent', 'End', 11) return user_agent # Wrapper for a REST (HTTP GET) request def restRequest(url): printDebugMessage('restRequest', 'Begin', 11) printDebugMessage('restRequest', 'url: ' + url, 11) # Errors are indicated by HTTP status codes. try: # Set the User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(url, None, http_headers) # Make the request (HTTP GET). reqH = urllib.request.urlopen(req) result = reqH.read() reqH.close() # Errors are indicated by HTTP status codes. except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('restRequest', 'End', 11) return result # Get input parameters list def serviceGetParameters(): printDebugMessage('serviceGetParameters', 'Begin', 1) requestUrl = baseUrl + '/parameters' printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameters', 'End', 1) return doc['id':] # Print list of parameters def printGetParameters(): printDebugMessage('printGetParameters', 'Begin', 1) idList = serviceGetParameters() for id in idList: print(id) printDebugMessage('printGetParameters', 'End', 1) # Get input parameter information def serviceGetParameterDetails(paramName): printDebugMessage('serviceGetParameterDetails', 'Begin', 1) printDebugMessage('serviceGetParameterDetails', 'paramName: ' + paramName, 2) requestUrl = baseUrl + '/parameterdetails/' + paramName printDebugMessage('serviceGetParameterDetails', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameterDetails', 'End', 1) return doc # Print description of a parameter def printGetParameterDetails(paramName): printDebugMessage('printGetParameterDetails', 'Begin', 1) doc = serviceGetParameterDetails(paramName) print(str(doc.name) + "\t" + str(doc.type)) print(doc.description) for value in doc.values: print(value.value, end=' ') if str(value.defaultValue) == 'true': print('default', end=' ') print() print("\t" + str(value.label)) if(hasattr(value, 'properties')): for wsProperty in value.properties: print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value)) #print doc printDebugMessage('printGetParameterDetails', 'End', 1) # Submit job def serviceRun(email, title, params): printDebugMessage('serviceRun', 'Begin', 1) # Insert e-mail and title into params params['email'] = email if title: params['title'] = title requestUrl = baseUrl + '/run/' printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2) # Signature methods requires special handling (list) applData = '' if 'appl' in params: # So extract from params applList = params['appl'] del params['appl'] # Build the method data options for appl in applList: applData += '&appl=' + appl # Get the data for the other options requestData = urllib.parse.urlencode(params) # Concatenate the two parts. requestData += applData printDebugMessage('serviceRun', 'requestData: ' + requestData, 2) # Errors are indicated by HTTP status codes. try: # Set the HTTP User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(requestUrl, None, http_headers) # Make the submission (HTTP POST). reqH = urllib.request.urlopen(req, requestData) jobId = reqH.read() reqH.close() except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('serviceRun', 'jobId: ' + jobId, 2) printDebugMessage('serviceRun', 'End', 1) return jobId # Get job status def serviceGetStatus(jobId): printDebugMessage('serviceGetStatus', 'Begin', 1) printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/status/' + jobId printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2) status = restRequest(requestUrl) printDebugMessage('serviceGetStatus', 'status: ' + status, 2) printDebugMessage('serviceGetStatus', 'End', 1) return status # Print the status of a job def printGetStatus(jobId): printDebugMessage('printGetStatus', 'Begin', 1) status = serviceGetStatus(jobId) print(status) printDebugMessage('printGetStatus', 'End', 1) # Get available result types for job def serviceGetResultTypes(jobId): printDebugMessage('serviceGetResultTypes', 'Begin', 1) printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/resulttypes/' + jobId printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetResultTypes', 'End', 1) return doc['type':] # Print list of available result types for a job. def printGetResultTypes(jobId): printDebugMessage('printGetResultTypes', 'Begin', 1) resultTypeList = serviceGetResultTypes(jobId) for resultType in resultTypeList: print(resultType['identifier']) if(hasattr(resultType, 'label')): print("\t", resultType['label']) if(hasattr(resultType, 'description')): print("\t", resultType['description']) if(hasattr(resultType, 'mediaType')): print("\t", resultType['mediaType']) if(hasattr(resultType, 'fileSuffix')): print("\t", resultType['fileSuffix']) printDebugMessage('printGetResultTypes', 'End', 1) # Get result def serviceGetResult(jobId, type_): printDebugMessage('serviceGetResult', 'Begin', 1) printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2) printDebugMessage('serviceGetResult', 'type_: ' + type_, 2) requestUrl = baseUrl + '/result/' + jobId + '/' + type_ result = restRequest(requestUrl) printDebugMessage('serviceGetResult', 'End', 1) return result # Client-side poll def clientPoll(jobId): printDebugMessage('clientPoll', 'Begin', 1) result = 'PENDING' while result == 'RUNNING' or result == 'PENDING': result = serviceGetStatus(jobId) print(result, file=sys.stderr) if result == 'RUNNING' or result == 'PENDING': time.sleep(checkInterval) printDebugMessage('clientPoll', 'End', 1) # Get result for a jobid def getResult(jobId):
# Read a file def readFile(filename): printDebugMessage('readFile', 'Begin', 1) fh = open(filename, 'r') data = fh.read() fh.close() printDebugMessage('readFile', 'End', 1) return data # No options... print help. if numOpts < 2: parser.print_help() # List parameters elif options.params: printGetParameters() # Get parameter details elif options.paramDetail: printGetParameterDetails(options.paramDetail) # Submit job elif options.email and not options.jobid: params = {} if 1 > 0: if os.access(options.input, os.R_OK): # Read file into content params['sequence'] = readFile(options.input) else: # Argument is a sequence id params['sequence'] = options.input elif options.sequence: # Specified via option if os.access(options.sequence, os.R_OK): # Read file into content params['sequence'] = readFile(options.sequence) else: # Argument is a sequence id params['sequence'] = options.sequence # Map flag options to boolean values. # if options.crc: # params['crc'] = True # elif options.nocrc: # params['crc'] = False if options.goterms: params['goterms'] = True elif options.nogoterms: params['goterms'] = False if options.pathways: params['pathways'] = True elif options.nopathways: params['pathways'] = False # Add the other options (if defined) if options.appl: params['appl'] = re.split('[ \t\n,;]+', options.appl) # Submit the job jobid = serviceRun(options.email, options.title, params) if options.async: # Async mode print(jobid) else: # Sync mode print(jobid, file=sys.stderr) time.sleep(5) getResult(jobid) # Get job status elif options.status and options.jobid: printGetStatus(options.jobid) # List result types for job elif options.resultTypes and options.jobid: printGetResultTypes(options.jobid) # Get results for job elif options.polljob and options.jobid: getResult(options.jobid) else: print('Error: unrecognised argument combination', file=sys.stderr) parser.print_help()
printDebugMessage('getResult', 'Begin', 1) printDebugMessage('getResult', 'jobId: ' + jobId, 1) # Check status and wait if necessary clientPoll(jobId) # Get available result types resultTypes = serviceGetResultTypes(jobId) for resultType in resultTypes: # Derive the filename for the result if options.outfile: filename = options.outfile + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) else: filename = jobId + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) # Write a result file if not options.outformat or options.outformat == str(resultType['identifier']): # Get the result result = serviceGetResult(jobId, str(resultType['identifier'])) fh = open(filename, 'w') fh.write(result) fh.close() print(filename) printDebugMessage('getResult', 'End', 1)
identifier_body
runIPRscan.py
#!/usr/bin/env python # $Id: iprscan5_urllib2.py 2809 2015-03-13 16:10:25Z uludag $ # ====================================================================== # # Copyright 2009-2014 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ====================================================================== # InterProScan 5 (REST) Python client using urllib2 and # xmltramp (http://www.aaronsw.com/2002/xmltramp/). # # Tested with: # Python 2.6.5 (Ubuntu 10.04 LTS) # Python 2.7.3 (Ubuntu 12.04 LTS) # # See: # http://www.ebi.ac.uk/Tools/webservices/services/pfa/iprscan5_rest # http://www.ebi.ac.uk/Tools/webservices/tutorials/python # ====================================================================== # Base URL for service import urllib.request, urllib.error, urllib.parse import urllib.request, urllib.parse, urllib.error import time import sys import re import os import platform import argparse import xmltramp baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/iprscan5' # Load libraries # Set interval for checking status checkInterval = 10 # Output level outputLevel = 1 # Debug level debugLevel = 0 # Number of option arguments. numOpts = len(sys.argv) # Usage message parser = argparse.ArgumentParser() # Tool specific options parser.add_argument('--input', required=True, help='input FASTA file') parser.add_argument('--appl', help='signature methods to use, see --paramDetail appl') parser.add_argument('--crc', action="store_true", help='enable InterProScan Matches look-up (ignored)') parser.add_argument('--nocrc', action="store_true", help='disable InterProScan Matches look-up (ignored)') parser.add_argument('--goterms', action="store_true", help='enable inclusion of GO terms') parser.add_argument('--nogoterms', action="store_true", help='disable inclusion of GO terms') parser.add_argument('--pathways', action="store_true", help='enable inclusion of pathway terms') parser.add_argument('--nopathways', action="store_true", help='disable inclusion of pathway terms') parser.add_argument('--sequence', help='input sequence file name') # General options parser.add_argument('--email', required=True, help='e-mail address') parser.add_argument('--title', help='job title') parser.add_argument('--outfile', help='file name for results') parser.add_argument('--outformat', help='output format for results') parser.add_argument('--async', action='store_true', help='asynchronous mode') parser.add_argument('--jobid', help='job identifier') parser.add_argument('--polljob', action="store_true", help='get job result') parser.add_argument('--status', action="store_true", help='get job status') parser.add_argument('--resultTypes', action='store_true', help='get result types') parser.add_argument('--params', action='store_true', help='list input parameters') parser.add_argument('--paramDetail', help='get details for parameter') parser.add_argument('--quiet', action='store_true', help='decrease output level') parser.add_argument('--verbose', action='store_true', help='increase output level') parser.add_argument('--baseURL', default=baseUrl, help='Base URL for service') parser.add_argument('--debugLevel', type=int, default=debugLevel, help='debug output level') options = parser.parse_args() # Increase output level if options.verbose: outputLevel += 1 # Decrease output level if options.quiet: outputLevel -= 1 # Debug level if options.debugLevel: debugLevel = options.debugLevel # Debug print def printDebugMessage(functionName, message, level): if(level <= debugLevel): print('[' + functionName + '] ' + message, file=sys.stderr) # User-agent for request (see RFC2616). def getUserAgent(): printDebugMessage('getUserAgent', 'Begin', 11) # Agent string for urllib2 library. urllib_agent = 'Python-urllib/%s' % urllib2.__version__ clientRevision = '$Revision: 2809 $' clientVersion = '0' if len(clientRevision) > 11: clientVersion = clientRevision[11:-2] # Prepend client specific agent string. user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % ( clientVersion, os.path.basename(__file__), platform.python_version(), platform.system(), urllib_agent ) printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12) printDebugMessage('getUserAgent', 'End', 11) return user_agent # Wrapper for a REST (HTTP GET) request def restRequest(url): printDebugMessage('restRequest', 'Begin', 11) printDebugMessage('restRequest', 'url: ' + url, 11) # Errors are indicated by HTTP status codes. try: # Set the User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(url, None, http_headers) # Make the request (HTTP GET). reqH = urllib.request.urlopen(req) result = reqH.read() reqH.close() # Errors are indicated by HTTP status codes. except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('restRequest', 'End', 11) return result # Get input parameters list def serviceGetParameters(): printDebugMessage('serviceGetParameters', 'Begin', 1) requestUrl = baseUrl + '/parameters' printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameters', 'End', 1) return doc['id':] # Print list of parameters def printGetParameters(): printDebugMessage('printGetParameters', 'Begin', 1) idList = serviceGetParameters() for id in idList: print(id) printDebugMessage('printGetParameters', 'End', 1) # Get input parameter information def serviceGetParameterDetails(paramName): printDebugMessage('serviceGetParameterDetails', 'Begin', 1) printDebugMessage('serviceGetParameterDetails', 'paramName: ' + paramName, 2) requestUrl = baseUrl + '/parameterdetails/' + paramName printDebugMessage('serviceGetParameterDetails', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetParameterDetails', 'End', 1) return doc # Print description of a parameter def printGetParameterDetails(paramName): printDebugMessage('printGetParameterDetails', 'Begin', 1) doc = serviceGetParameterDetails(paramName) print(str(doc.name) + "\t" + str(doc.type)) print(doc.description) for value in doc.values: print(value.value, end=' ') if str(value.defaultValue) == 'true': print('default', end=' ') print() print("\t" + str(value.label)) if(hasattr(value, 'properties')): for wsProperty in value.properties: print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value)) #print doc printDebugMessage('printGetParameterDetails', 'End', 1) # Submit job def serviceRun(email, title, params): printDebugMessage('serviceRun', 'Begin', 1) # Insert e-mail and title into params params['email'] = email if title: params['title'] = title requestUrl = baseUrl + '/run/' printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2) # Signature methods requires special handling (list) applData = '' if 'appl' in params: # So extract from params applList = params['appl'] del params['appl'] # Build the method data options for appl in applList: applData += '&appl=' + appl # Get the data for the other options requestData = urllib.parse.urlencode(params) # Concatenate the two parts. requestData += applData printDebugMessage('serviceRun', 'requestData: ' + requestData, 2) # Errors are indicated by HTTP status codes. try: # Set the HTTP User-agent. user_agent = getUserAgent() http_headers = {'User-Agent': user_agent} req = urllib.request.Request(requestUrl, None, http_headers) # Make the submission (HTTP POST). reqH = urllib.request.urlopen(req, requestData) jobId = reqH.read() reqH.close() except urllib.error.HTTPError as ex: # Trap exception and output the document to get error message. print(ex.read(), file=sys.stderr) raise printDebugMessage('serviceRun', 'jobId: ' + jobId, 2) printDebugMessage('serviceRun', 'End', 1) return jobId # Get job status def serviceGetStatus(jobId): printDebugMessage('serviceGetStatus', 'Begin', 1) printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/status/' + jobId printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2) status = restRequest(requestUrl) printDebugMessage('serviceGetStatus', 'status: ' + status, 2) printDebugMessage('serviceGetStatus', 'End', 1) return status # Print the status of a job def printGetStatus(jobId): printDebugMessage('printGetStatus', 'Begin', 1) status = serviceGetStatus(jobId) print(status) printDebugMessage('printGetStatus', 'End', 1) # Get available result types for job def serviceGetResultTypes(jobId): printDebugMessage('serviceGetResultTypes', 'Begin', 1) printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2) requestUrl = baseUrl + '/resulttypes/' + jobId printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2) xmlDoc = restRequest(requestUrl) doc = xmltramp.parse(xmlDoc) printDebugMessage('serviceGetResultTypes', 'End', 1) return doc['type':] # Print list of available result types for a job. def printGetResultTypes(jobId): printDebugMessage('printGetResultTypes', 'Begin', 1) resultTypeList = serviceGetResultTypes(jobId) for resultType in resultTypeList: print(resultType['identifier']) if(hasattr(resultType, 'label')): print("\t", resultType['label']) if(hasattr(resultType, 'description')): print("\t", resultType['description']) if(hasattr(resultType, 'mediaType')): print("\t", resultType['mediaType']) if(hasattr(resultType, 'fileSuffix')): print("\t", resultType['fileSuffix']) printDebugMessage('printGetResultTypes', 'End', 1) # Get result def serviceGetResult(jobId, type_): printDebugMessage('serviceGetResult', 'Begin', 1) printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2) printDebugMessage('serviceGetResult', 'type_: ' + type_, 2) requestUrl = baseUrl + '/result/' + jobId + '/' + type_ result = restRequest(requestUrl) printDebugMessage('serviceGetResult', 'End', 1) return result # Client-side poll def clientPoll(jobId): printDebugMessage('clientPoll', 'Begin', 1) result = 'PENDING' while result == 'RUNNING' or result == 'PENDING': result = serviceGetStatus(jobId) print(result, file=sys.stderr) if result == 'RUNNING' or result == 'PENDING': time.sleep(checkInterval) printDebugMessage('clientPoll', 'End', 1) # Get result for a jobid
clientPoll(jobId) # Get available result types resultTypes = serviceGetResultTypes(jobId) for resultType in resultTypes: # Derive the filename for the result if options.outfile: filename = options.outfile + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) else: filename = jobId + '.' + \ str(resultType['identifier']) + '.' + \ str(resultType['fileSuffix']) # Write a result file if not options.outformat or options.outformat == str(resultType['identifier']): # Get the result result = serviceGetResult(jobId, str(resultType['identifier'])) fh = open(filename, 'w') fh.write(result) fh.close() print(filename) printDebugMessage('getResult', 'End', 1) # Read a file def readFile(filename): printDebugMessage('readFile', 'Begin', 1) fh = open(filename, 'r') data = fh.read() fh.close() printDebugMessage('readFile', 'End', 1) return data # No options... print help. if numOpts < 2: parser.print_help() # List parameters elif options.params: printGetParameters() # Get parameter details elif options.paramDetail: printGetParameterDetails(options.paramDetail) # Submit job elif options.email and not options.jobid: params = {} if 1 > 0: if os.access(options.input, os.R_OK): # Read file into content params['sequence'] = readFile(options.input) else: # Argument is a sequence id params['sequence'] = options.input elif options.sequence: # Specified via option if os.access(options.sequence, os.R_OK): # Read file into content params['sequence'] = readFile(options.sequence) else: # Argument is a sequence id params['sequence'] = options.sequence # Map flag options to boolean values. # if options.crc: # params['crc'] = True # elif options.nocrc: # params['crc'] = False if options.goterms: params['goterms'] = True elif options.nogoterms: params['goterms'] = False if options.pathways: params['pathways'] = True elif options.nopathways: params['pathways'] = False # Add the other options (if defined) if options.appl: params['appl'] = re.split('[ \t\n,;]+', options.appl) # Submit the job jobid = serviceRun(options.email, options.title, params) if options.async: # Async mode print(jobid) else: # Sync mode print(jobid, file=sys.stderr) time.sleep(5) getResult(jobid) # Get job status elif options.status and options.jobid: printGetStatus(options.jobid) # List result types for job elif options.resultTypes and options.jobid: printGetResultTypes(options.jobid) # Get results for job elif options.polljob and options.jobid: getResult(options.jobid) else: print('Error: unrecognised argument combination', file=sys.stderr) parser.print_help()
def getResult(jobId): printDebugMessage('getResult', 'Begin', 1) printDebugMessage('getResult', 'jobId: ' + jobId, 1) # Check status and wait if necessary
random_line_split
main.py
#!/usr/bin/python # __ _ _ # /__\__ ___ __ _ _ __ ___ | |_| |__ ___ /\ /\___ _ _ ___ ___ # /_\/ __|/ __/ _` | '_ \ / _ \ | __| '_ \ / _ \ / /_/ / _ \| | | / __|/ _ \ # //__\__ \ (_| (_| | |_) | __/ | |_| | | | __/ / __ / (_) | |_| \__ \ __/ # \__/|___/\___\__,_| .__/ \___| \__|_| |_|\___| \/ /_/ \___/ \__,_|___/\___| # |_| # Modified By Sam Scotford # Credits to Al Sweigart for bulk of sorce code # - https://inventwithpython.com/blog/2014/12/11/making-a-text-adventure-game-with-the-cmd-and-textwrap-python-modules/ # # To-Do: # Have a lighting system like in Zork, where the player must have a torch to enter certain rooms # Have some combat # Items within items needs fixing / implementing # Fix bug where trying to store items outside of the mainhall produces an error # Ui Improvements import _random import datetime #import player_class # Player class external import os import cmd import textwrap import colorama import ctypes import platform # Output colors colorama.init() # Set Window title #ctypes.windll.kernel32.SetConsoleTitleA("Escape The House v5.0.9") class bcolors: start = "\033[1;31m" end = "\033[0;0m" # Constent variables DEFAULT = '' DESC = 'desc' NORTH = 'north' SOUTH = 'south' EAST = 'east' WEST = 'west' UP = 'up' DOWN = 'down' GROUND = 'ground' STORAGE = 'storage' DOOR = 'door' HIDDEN = 'hidden' GROUNDDESC = 'grounddesc' INVDESC = 'invdesc' SHORTDESC = 'shortdesc' LONGDESC = 'longdesc' TAKEABLE = 'takeable' STORABLE = 'storeable' # able to be stored in a bag / items inventory STORAGE = 'storage' STORAGEDESC = 'storagedesc' EDIBLE = 'edible' USEABLE = 'usable' TOGGLE = 'TOGGLE' USEDESCTRUE = 'usedesctrue' USEDESCFALSE = 'usedescfalse' ITEMINV = 'iteminv' ITEM_INVENTORY = [] DESCWORDS = 'descwords' SCREEN_WIDTH = 80 location = 'Entrance Hall' # Start here inventory = ['Personal ID'] showFullExits = False # Define rooms here worldRooms = { 'Entrance Hall': { DESC: "You stand in a hallway with a single door infront of you, the room is dimly lit from a single bulb that flickers occosionally.", NORTH: 'Main Hall', GROUND: ['Old Key', 'Note', 'Sack'], }, 'Main Hall': { DESC: 'You stand in the main hall there are debris everywhere you look it looks a bomb went off in here, there is also a small troll statue on the side table.', SOUTH: 'Entrance Hall', EAST: 'Kitchen', WEST: 'Library', UP: '2nd Floor', GROUND: ['Torch', 'Chest', 'Troll'], STORAGE: [], # Used to store items and win the game }, 'Kitchen' : { DESC: "You stand in what appears to be a large kitchen, pots and pans hang from the walls and though dusty look quite well used.", SOUTH: 'Main Hall', EAST: 'Garden', GROUND: '', }, 'Garden' : { DESC: "You are in an overgrown garden, a prestine lawn-mower lies in the undergrown somewhat ironically, a tree house can be seen in the distance", WEST: "Kitchen", UP: "Treehouse", GROUND: '', }, "Treehouse" : { DESC: "With much effort on your part and that of the rather brittle ladders, you make it to the top, a strong breeze shakes the tree and you being to feel rather ill.", NORTH: "Flimsy Branch", DOWN: "Garden", GROUND: "", }, 'Library': { DESC: "You walk into the Library, the smell of dusty books invades your nostrils. There is a valuted ceiling here and the moonlight shines into the room through stained glass windows making dancing shapes on the bookcases.", EAST: 'Main Hall', GROUND: ['Dusty Books', 'Gun'], }, 'Library 2nd Floor': { DESC : "You pass thorugh a creaky door on your way to the second floor libary.", EAST : "2nd Floor", GROUND : "", }, '2nd Floor' : { DESC: "You go up the spiraling staircase onto the second floor, the first was blocked by an impassable door, you look down and have sudden virtigo from the height, you turn around.", DOWN: 'Main Hall', WEST : 'Library 2nd Floor', EAST : 'Bedroom', UP : 'Attic', GROUND:['Cassette'] }, 'Attic' : { DESC : "Above you is an old wooden hatch with a brass ring, using your feeble legs you give up trying to reach for it and instead grab a hook from beside you and leaver the creaky door open, about a million weevels fall from the opening. You climb inside.", DOWN : "2nd Floor", UP: "Roof", GROUND : ['Weevels'] } } # Define items in the world worldItems = { 'Old Key': { GROUNDDESC: 'A dull brass key lies on the ground here.', SHORTDESC: 'Old key', LONGDESC: 'The old key has intricate inscriptions on it, it is covered in dust from years of unuse, it is cool to the touch.', TAKEABLE: True, EDIBLE: False, USEABLE: True, DESCWORDS: ['old', 'key'], STORAGEDESC: '[The old key] is within the chest, its like leaving the car keys in the in the car and regretting ever being born, good job..' }, 'Personal ID': { GROUNDDESC: 'Your personal identification card lies on the floor.', SHORTDESC: 'Your ID card.', LONGDESC: 'Through much mental anguish you read your card, You are Henry Burton, your age is 34 and have no idea of where you are.', TAKEABLE: True, DESCWORDS: ['id', 'card'], EDIBLE: False, USEABLE: False, STORAGEDESC: '[ID] Not even the blackness of the chest can hide your ugly mug, not even the weevles will go near it.', }, 'Note': { GROUNDDESC: 'A note lies here, its hard to make out what it says from where you are', SHORTDESC: 'Note', LONGDESC: 'Its a note and it reads "There was no mailbox outside to leave this in so I just left it here.."', TAKEABLE: True, EDIBLE: True, USEABLE: True, USEDESCTRUE: 'You use the note, and wipe your snotty nose with it, now you have a snotty note, gross.', DESCWORDS: ['note'], STORAGEDESC : '[Note] Not sure when you want this in here, but it is regardless..' }, 'Torch': { GROUNDDESC: 'A typical torch / flashlight lies on the floor', SHORTDESC: 'Torch / Flashlight.', LONGDESC: 'The torch / flashlight emits a soft flow, just enough to light the way.', EDIBLE: False, USEABLE: True, TOGGLE: False, USEDESCTRUE: 'You click the top of the touch, a beam illuminates your way, you can now be lost and see what you are doing.', USEDESCFALSE: 'YOu click the top of the touch, you think "Hey who turned out the lights.", the torch stays on, you bust the switch with your meaty fingers.. good job.', DESCWORDS: ['torch', 'flashlight'], STORAGEDESC: '[Torch] In the blackness of the chest not even the torches light esacpes.. spooky.', }, 'Dusty Books': { GROUNDDESC: 'A pile of dusty books lies on the floor near a reading desk.', SHORTDESC: 'Dusty Books', LONGDESC: 'A pile of dusty old books pages half rotting away, its hard to make out what is written in them, Hitchickers Guide to the Galaxy, How to stew a ham in 43 different ways and various other, written, human detritus.', EDIBLE: False, USEABLE: False, DESCWORDS: ['books','book'], STORAGEDESC: '[Dusty Books] The books lie at the bottom of the chest looking miserable.' }, 'Gun': { GROUNDDESC: 'A gun lies on the floor here.', SHORTDESC: 'Gun', LONGDESC: 'A 32 ACP revolver it has 5 chaimbers, one of the cartridges has been fired.', EDIBLE: False, USEABLE: True, DESCWORDS: ['Gun','gun','revolver'], STORAGEDESC: '[Gun] Better the gun be in here then in my hands..', }, 'Sack': { GROUNDDESC: 'A sack of burlap lies on the floor here', SHORTDESC: 'Sack', LONGDESC: 'Its an old sack used for storing things in, it smells like onions.', EDIBLE: False, DESCWORDS: ['Sack', 'bag', 'sack'], STORAGEDESC: '[Sack] A container with in a container, its like that terrible movie with Leonardo DiCaprio..', # Attempting "Items within Items" ITEMINV: ['Lunch'], }, 'Chest' : { SHORTDESC: 'A wooden chest', GROUNDDESC: 'A wooden chest resides in the far corner of this room with an incription on it.', LONGDESC: 'Its an old wooden chest with the inscription "Por viaj malmolaj gajnitaj eroj." the language begins with an Esp... you know that much.', EDIBLE: False, TAKEABLE: False, USEABLE: False, DESCWORDS: ['Chest', 'Box', 'Crate', 'chest', 'box', 'crate'], }, 'Troll' : { SHORTDESC: 'A troll figure', GROUNDDESC : 'A troll is somewhere around here.', LONGDESC : 'A small troll figure carved from wood, you turn it over in your hands, an inscription on the base "RIP Inbag the Troll.", a disembodied scottish voice tells you to not put it in your bag.', EDIBLE : False, USEABLE: False, TAKEABLE : False, DESCWORDS : ['Troll', 'troll', 'figure', 'statue'], STORAGEDESC : '[Troll] The troll lies disgruntled in the chest, its dark in there, it might be eaten by a Grew.' }, 'Cassette' : { SHORTDESC: 'A cassette tape', GROUNDDESC: 'A cassette tape lies here on the floor, someone must have "dropped the bass".', LONGDESC: 'You turn the cassette tape over in your hands, the lable reads "Best of the 60s", it possibly contains Fleetwood Mac and thus must be destroyed immediately.', EDIBLE: False, USEABLE: False, TAKEABLE: True, DESCWORDS: ['tape', 'cassette', 'music tape', 'music'], STORAGEDESC : '[Tape] A tape lies in the bottom of the chest, we would have prefered you to burn it but this choice is yours.', }, 'Weevels' : { SHORTDESC: 'A pile of dead weevels', GROUNDDESC : 'A pile of rotting weeveles lay on the ground.', LONGDESC : 'Its a pile of fucking rotting weevels', EDIBLE : True, USEABLE : False, TAKEABLE : True, DESCWORDS: ['weevels', 'pile of weevels', 'rotting weevels'], STORAGEDESC : '[Weevels] A pile of rotting weevel husks lie at the bottom of the chest.' } } global default default = "" def displayLocation(loc, default): """A helper function for displaying an area's description and exits.""" # Print the room name. print(bcolors.start + loc + bcolors.end) print('=' * len(loc)) # Print the room's description (using textwrap.wrap()) print('\n'.join(textwrap.wrap(worldRooms[loc][DESC], SCREEN_WIDTH))) # Print all the items on the ground. if len(worldRooms[loc][GROUND]) > 0: print("") for item in worldRooms[loc][GROUND]: print(worldItems[item][GROUNDDESC]) try: # Check storage exists if len(worldRooms[loc][STORAGE]) > 0: print(bcolors.start + "The treasures you have accrewed thus far are (Chest) :" + bcolors.end) for item in worldRooms[loc][STORAGE]: print (worldItems[item][STORAGEDESC]) except KeyError: return default # Print all the exits. exits = [] for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[loc].keys(): exits.append(direction.title()) print("") if showFullExits: for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) else: print('Exits: %s' % ' '.join(exits)) def moveDirection(direction): """A helper function that changes the location of the player.""" global location if direction in worldRooms[location]: print('You move to the %s.' % direction) location = worldRooms[location][direction] displayLocation(location, default) else: print('You cannot move in that direction') def getAllDescWords(itemList): """Returns a list of "description words" for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.extend(worldItems[item][DESCWORDS]) return list(set(descWords)) def getAllFirstDescWords(itemList): """Returns a list of the first "description word" in the list of description words for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.append(worldItems[item][DESCWORDS][0]) return list(set(descWords)) def getFirstItemMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique for item in itemList: if desc in worldItems[item][DESCWORDS]: return item return None def getAllItemsMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique matchingItems = [] for item in itemList: if desc in worldItems[item][DESCWORDS]: matchingItems.append(item) return matchingItems class TextAdventureCmd(cmd.Cmd): prompt = '\n> ' # The default() method is called when none of the other do_*() command methods match. def default(self, arg): print('I do not understand that command. Type ' + bcolors.start + '"help"' + bcolors.end + ' for a list of commands.') # A very simple "quit" command to terminate the program: def do_quit(self, arg): """Quit the game.""" return True # this exits the Cmd application loop in TextAdventureCmd.cmdloop() def help_combat(self): print('Combat is not implemented in this program.') # These direction commands have a long (i.e. north) and show (i.e. n) form. # Since the code is basically the same, I put it in the moveDirection() # function. def do_north(self, arg): """Go to the area to the north, if possible.""" moveDirection('north') def do_south(self, arg): """Go to the area to the south, if possible.""" moveDirection('south') def do_east(self, arg): """Go to the area to the east, if possible.""" moveDirection('east') def do_west(self, arg): """Go to the area to the west, if possible.""" moveDirection('west') def do_up(self, arg): """Go to the area upwards, if possible.""" moveDirection('up') def do_down(self, arg): """Go to the area downwards, if possible.""" moveDirection('down') # Since the code is the exact same, we can just copy the # methods with shortened names: do_n = do_north do_s = do_south do_e = do_east do_w = do_west do_u = do_up do_d = do_down def do_exits(self, arg): """Toggle showing full exit descriptions or brief exit descriptions.""" global showFullExits showFullExits = not showFullExits if showFullExits: print('Showing full exit descriptions.') else: print('Showing brief exit descriptions.') def do_inventory(self, arg): """Display a list of the items in your possession.""" if len(inventory) == 0: print('Inventory:\n (nothing)') return # first get a count of each distinct item in the inventory itemCount = {} for item in inventory: if item in itemCount.keys(): itemCount[item] += 1 else: itemCount[item] = 1 # get a list of inventory items with duplicates removed: print('Inventory:') for item in set(inventory): if itemCount[item] > 1: print(' %s (%s)' % (item, itemCount[item])) else: print(' ' + item) do_inv = do_inventory do_i = do_inventory def do_take(self, arg): """"take <item> - Take an item on the ground.""" # put this value in a more suitably named variable itemToTake = arg.lower() if itemToTake == '': print('Take what? Type "look" the items on the ground here.') return cantTake = False # get the item name that the player's command describes for item in getAllItemsMatchingDesc(itemToTake, worldRooms[location][GROUND]): if worldItems[item].get(TAKEABLE, True) == False: cantTake = True continue # there may be other items named this that you can take, so we continue checking print("Taken.") worldRooms[location][GROUND].remove(item) # remove from the ground inventory.append(item) # add to inventory return # something funny if itemToTake == 'chest': print(bcolors.start + "Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git." + bcolors.end) return if cantTake: print('You cannot take "%s".' % (itemToTake)) else: print('That is not in or around the area, maybe it was your imagination?') def do_use(self, arg): """"use <item> - Use an item in in your inventory.""" itemToUse = arg.lower() if itemToUse == '': print('Use what? Type "inv" to see the items in your invetory.') return cantUse = False #look up the item the player describes invDescWords = getAllDescWords(inventory) if itemToUse not in invDescWords: print('You do not have that item to use it') return for item in getAllItemsMatchingDesc(itemToUse, inventory): if worldItems[item].get(USEABLE, True) == False: cantUse = True continue print('%s' % (worldItems[item][USEDESCTRUE])) #print('You use %s' % (worldItems[item][SHORTDESC])) #inventory.remove(item) return if cantUse: print('You cannot use "%s".' % (itemToUse)) else: print('You do not have that item to use.') def do_drop(self, arg): """"drop <item> - Drop an item from your inventory onto the ground.""" # put this value in a more suitably named variable itemToDrop = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # find out if the player doesn't have that item if itemToDrop not in invDescWords: print('You do not have "%s" in your inventory.' % (itemToDrop)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToDrop, inventory) if item != None: print('You drop %s.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][GROUND].append(item) # add to the ground # put items in a item container def do_put(self, arg): """"put <item> in <item> - Puts an item in a container.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('You want to put "%s" in what?!' % (itemToStore)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToStore, inventory) if item != None: print('You put %s. in the container.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][ITEMINV].append(item) # add to the container def do_store(self, arg): """"store <item> - Stores an item in a safe place, assuming that the room has a storage area.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore)) return # get the item name that the player's command describes try: item = getFirstItemMatchingDesc(itemToStore, inventory) # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item. if item != None: print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) inventory.remove(item) worldRooms[location][STORAGE].append(item) except KeyError: return("Don't even think about it buster brown.") #item = getFirstItemMatchingDesc(itemToStore, inventory) #if item != None: # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) # inventory.remove(item) # remove from inventory # worldRooms[location][STORAGE].append(item) # add to the container def complete_take(self, text, line, begidx, endidx): possibleItems = [] text = text.lower() # if the user has only typed "take" but no item name: if not text: return getAllFirstDescWords(worldRooms[location][GROUND]) # otherwise, get a list of all "description words" for ground items matching the command text so far: for item in list(set(worldRooms[location][GROUND])): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(TAKEABLE, True): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique def complete_drop(self, text, line, begidx, endidx): possibleItems = [] itemToDrop = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) for descWord in invDescWords: if line.startswith('drop %s' % (descWord)): return [] # command is complete # if the user has only typed "drop" but no item name: if itemToDrop == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(text): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique def do_look(self, arg): """Look at an item, direction, or the area: "look" - display the current area's description "look <direction>" - display the description of the area in that direction "look exits" - display the description of all adjacent areas "look <item>" - display the description of an item on the ground in storage or in your inventory""" lookingAt = arg.lower() if lookingAt == '': # "look" will re-print the area description displayLocation(location, default) return if lookingAt == 'exits': for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) return if lookingAt in ('north', 'west', 'east', 'south', 'up', 'down', 'n', 'w', 'e', 's', 'u', 'd'): if lookingAt.startswith('n') and NORTH in worldRooms[location]: print(worldRooms[location][NORTH]) elif lookingAt.startswith('w') and WEST in worldRooms[location]: print(worldRooms[location][WEST]) elif lookingAt.startswith('e') and EAST in worldRooms[location]: print(worldRooms[location][EAST]) elif lookingAt.startswith('s') and SOUTH in worldRooms[location]: print(worldRooms[location][SOUTH]) elif lookingAt.startswith('u') and UP in worldRooms[location]: print(worldRooms[location][UP]) elif lookingAt.startswith('d') and DOWN in worldRooms[location]: print(worldRooms[location][DOWN]) else: print('There is nothing in that direction.') return # see if the item being looked at is on the ground at this location or in storage. #item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND][STORAGE]) item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND]) if item != None: print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return # see if the item being looked at is in the inventory item = getFirstItemMatchingDesc(lookingAt, inventory) if item != None: print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return print('You do not see that nearby.') def complete_look(self, text, line, begidx, endidx): possibleItems = [] lookingAt = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) groundDescWords = getAllDescWords(worldRooms[location][GROUND]) for descWord in invDescWords + groundDescWords + [NORTH, SOUTH, EAST, WEST, UP, DOWN]: if line.startswith('look %s' % (descWord)): return [] # command is complete # if the user has only typed "look" but no item name, show all items on ground and directions: if lookingAt == '': possibleItems.extend(getAllFirstDescWords(worldRooms[location][GROUND][STORAGE])) for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: possibleItems.append(direction) return list(set(possibleItems)) # make list unique # otherwise, get a list of all "description words" for ground items matching the command text so far: for descWord in groundDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) # check for matching directions for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction.startswith(lookingAt): possibleItems.append(direction) # get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique #arg = arg.lower() # Extra ways of writing commands do_read = do_look do_l = do_look def do_eat(self, arg): """"eat <item>" - eat an item in your inventory.""" itemToEat = arg.lower() if itemToEat == '': print('Eat what? Type "inventory" or "inv" to see whats in your inventory.') return cantEat = False for item in getAllItemsMatchingDesc(itemToEat, inventory): if worldItems[item].get(EDIBLE, False) == False: cantEat = True continue # there may be other items named this that you can eat, so we continue checking # NOTE - If you wanted to implement hunger levels, here is where # you would add code that changes the player's hunger level. print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC])) inventory.remove(item) return if cantEat: print('I dont think the "%s" would like you to do that...' % (worldItems[item][SHORTDESC])) else: print('You do not have "%s". Type "inventory" or "inv" to see what in your inventory.' % (itemToEat)) def complete_eat(self, text, line, begidx, endidx): itemToEat = text.lower() possibleItems = [] # if the user has only typed "eat" but no item name: if itemToEat == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for edible inventory items matching the command text so far: for item in list(set(inventory)): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(EDIBLE, False): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique do_exit = do_quit # another way of exiting the game with a differnt word def
(self, arg): """"clear" - Clear all text from the screen.""" if platform.system == "Windows": os.system("cls") else: os.system("clear") # :) def do_hacf(self, arg): if 'Troll' in worldRooms['Main Hall'][STORAGE]: print(bcolors.start + 'A patch will be released soon, you have compinsated, please do not take us to court.' + bcolors.end) else: """hacf - ### ?????? ###""" fake_error = ValueError('ERROR : The spacetime continum has been breached, all you know is a lie.') print(fake_error) print(bcolors.start + "The developer is sorry for this bug, we have deposited something nice for you in the chest." + bcolors.end) #worldItems[item][DESCWORDS] item = 'Troll' worldRooms['Main Hall'][STORAGE].append(item) worldRooms['Main Hall'][GROUND].remove(item) if __name__ == '__main__': print(bcolors.start + ' Escape the house ' + bcolors.end) print('====================') print("") print('[Type "help" for commands.]') print("") displayLocation(location, default) TextAdventureCmd().cmdloop() if len(STORAGE) > 15: print("You hear a soft click, could it be a trap?, Gnomes?!, *GASP* swamp folk... the worst, no its just the door opening, you are free to leave.") print("Congratulations are in order, you found evenything however turns out you could have just opened the door it was never locked at all... good job though..") print('Looks like you are going to be stuck here for a very, very long time.')
do_clear
identifier_name
main.py
#!/usr/bin/python # __ _ _ # /__\__ ___ __ _ _ __ ___ | |_| |__ ___ /\ /\___ _ _ ___ ___ # /_\/ __|/ __/ _` | '_ \ / _ \ | __| '_ \ / _ \ / /_/ / _ \| | | / __|/ _ \ # //__\__ \ (_| (_| | |_) | __/ | |_| | | | __/ / __ / (_) | |_| \__ \ __/ # \__/|___/\___\__,_| .__/ \___| \__|_| |_|\___| \/ /_/ \___/ \__,_|___/\___| # |_| # Modified By Sam Scotford # Credits to Al Sweigart for bulk of sorce code # - https://inventwithpython.com/blog/2014/12/11/making-a-text-adventure-game-with-the-cmd-and-textwrap-python-modules/ # # To-Do: # Have a lighting system like in Zork, where the player must have a torch to enter certain rooms # Have some combat # Items within items needs fixing / implementing # Fix bug where trying to store items outside of the mainhall produces an error # Ui Improvements import _random import datetime #import player_class # Player class external import os import cmd import textwrap import colorama import ctypes import platform # Output colors colorama.init() # Set Window title #ctypes.windll.kernel32.SetConsoleTitleA("Escape The House v5.0.9") class bcolors: start = "\033[1;31m" end = "\033[0;0m" # Constent variables DEFAULT = '' DESC = 'desc' NORTH = 'north' SOUTH = 'south' EAST = 'east' WEST = 'west' UP = 'up' DOWN = 'down' GROUND = 'ground' STORAGE = 'storage' DOOR = 'door' HIDDEN = 'hidden' GROUNDDESC = 'grounddesc' INVDESC = 'invdesc' SHORTDESC = 'shortdesc' LONGDESC = 'longdesc' TAKEABLE = 'takeable' STORABLE = 'storeable' # able to be stored in a bag / items inventory STORAGE = 'storage' STORAGEDESC = 'storagedesc' EDIBLE = 'edible' USEABLE = 'usable' TOGGLE = 'TOGGLE' USEDESCTRUE = 'usedesctrue' USEDESCFALSE = 'usedescfalse' ITEMINV = 'iteminv' ITEM_INVENTORY = [] DESCWORDS = 'descwords' SCREEN_WIDTH = 80 location = 'Entrance Hall' # Start here inventory = ['Personal ID'] showFullExits = False # Define rooms here worldRooms = { 'Entrance Hall': { DESC: "You stand in a hallway with a single door infront of you, the room is dimly lit from a single bulb that flickers occosionally.", NORTH: 'Main Hall', GROUND: ['Old Key', 'Note', 'Sack'], }, 'Main Hall': { DESC: 'You stand in the main hall there are debris everywhere you look it looks a bomb went off in here, there is also a small troll statue on the side table.', SOUTH: 'Entrance Hall', EAST: 'Kitchen', WEST: 'Library', UP: '2nd Floor', GROUND: ['Torch', 'Chest', 'Troll'], STORAGE: [], # Used to store items and win the game }, 'Kitchen' : { DESC: "You stand in what appears to be a large kitchen, pots and pans hang from the walls and though dusty look quite well used.", SOUTH: 'Main Hall', EAST: 'Garden', GROUND: '', }, 'Garden' : { DESC: "You are in an overgrown garden, a prestine lawn-mower lies in the undergrown somewhat ironically, a tree house can be seen in the distance", WEST: "Kitchen", UP: "Treehouse", GROUND: '', }, "Treehouse" : { DESC: "With much effort on your part and that of the rather brittle ladders, you make it to the top, a strong breeze shakes the tree and you being to feel rather ill.", NORTH: "Flimsy Branch", DOWN: "Garden", GROUND: "", }, 'Library': { DESC: "You walk into the Library, the smell of dusty books invades your nostrils. There is a valuted ceiling here and the moonlight shines into the room through stained glass windows making dancing shapes on the bookcases.", EAST: 'Main Hall', GROUND: ['Dusty Books', 'Gun'], }, 'Library 2nd Floor': { DESC : "You pass thorugh a creaky door on your way to the second floor libary.", EAST : "2nd Floor", GROUND : "", }, '2nd Floor' : { DESC: "You go up the spiraling staircase onto the second floor, the first was blocked by an impassable door, you look down and have sudden virtigo from the height, you turn around.", DOWN: 'Main Hall', WEST : 'Library 2nd Floor', EAST : 'Bedroom', UP : 'Attic', GROUND:['Cassette'] }, 'Attic' : { DESC : "Above you is an old wooden hatch with a brass ring, using your feeble legs you give up trying to reach for it and instead grab a hook from beside you and leaver the creaky door open, about a million weevels fall from the opening. You climb inside.", DOWN : "2nd Floor", UP: "Roof", GROUND : ['Weevels'] } } # Define items in the world worldItems = { 'Old Key': { GROUNDDESC: 'A dull brass key lies on the ground here.', SHORTDESC: 'Old key', LONGDESC: 'The old key has intricate inscriptions on it, it is covered in dust from years of unuse, it is cool to the touch.', TAKEABLE: True, EDIBLE: False, USEABLE: True, DESCWORDS: ['old', 'key'], STORAGEDESC: '[The old key] is within the chest, its like leaving the car keys in the in the car and regretting ever being born, good job..' }, 'Personal ID': { GROUNDDESC: 'Your personal identification card lies on the floor.', SHORTDESC: 'Your ID card.', LONGDESC: 'Through much mental anguish you read your card, You are Henry Burton, your age is 34 and have no idea of where you are.', TAKEABLE: True, DESCWORDS: ['id', 'card'], EDIBLE: False, USEABLE: False, STORAGEDESC: '[ID] Not even the blackness of the chest can hide your ugly mug, not even the weevles will go near it.', }, 'Note': { GROUNDDESC: 'A note lies here, its hard to make out what it says from where you are', SHORTDESC: 'Note', LONGDESC: 'Its a note and it reads "There was no mailbox outside to leave this in so I just left it here.."', TAKEABLE: True, EDIBLE: True, USEABLE: True, USEDESCTRUE: 'You use the note, and wipe your snotty nose with it, now you have a snotty note, gross.', DESCWORDS: ['note'], STORAGEDESC : '[Note] Not sure when you want this in here, but it is regardless..' }, 'Torch': { GROUNDDESC: 'A typical torch / flashlight lies on the floor', SHORTDESC: 'Torch / Flashlight.', LONGDESC: 'The torch / flashlight emits a soft flow, just enough to light the way.', EDIBLE: False, USEABLE: True, TOGGLE: False, USEDESCTRUE: 'You click the top of the touch, a beam illuminates your way, you can now be lost and see what you are doing.', USEDESCFALSE: 'YOu click the top of the touch, you think "Hey who turned out the lights.", the torch stays on, you bust the switch with your meaty fingers.. good job.', DESCWORDS: ['torch', 'flashlight'], STORAGEDESC: '[Torch] In the blackness of the chest not even the torches light esacpes.. spooky.', }, 'Dusty Books': { GROUNDDESC: 'A pile of dusty books lies on the floor near a reading desk.', SHORTDESC: 'Dusty Books', LONGDESC: 'A pile of dusty old books pages half rotting away, its hard to make out what is written in them, Hitchickers Guide to the Galaxy, How to stew a ham in 43 different ways and various other, written, human detritus.', EDIBLE: False, USEABLE: False, DESCWORDS: ['books','book'], STORAGEDESC: '[Dusty Books] The books lie at the bottom of the chest looking miserable.' }, 'Gun': { GROUNDDESC: 'A gun lies on the floor here.', SHORTDESC: 'Gun', LONGDESC: 'A 32 ACP revolver it has 5 chaimbers, one of the cartridges has been fired.', EDIBLE: False, USEABLE: True, DESCWORDS: ['Gun','gun','revolver'], STORAGEDESC: '[Gun] Better the gun be in here then in my hands..', }, 'Sack': { GROUNDDESC: 'A sack of burlap lies on the floor here', SHORTDESC: 'Sack', LONGDESC: 'Its an old sack used for storing things in, it smells like onions.', EDIBLE: False, DESCWORDS: ['Sack', 'bag', 'sack'], STORAGEDESC: '[Sack] A container with in a container, its like that terrible movie with Leonardo DiCaprio..', # Attempting "Items within Items" ITEMINV: ['Lunch'], }, 'Chest' : { SHORTDESC: 'A wooden chest', GROUNDDESC: 'A wooden chest resides in the far corner of this room with an incription on it.', LONGDESC: 'Its an old wooden chest with the inscription "Por viaj malmolaj gajnitaj eroj." the language begins with an Esp... you know that much.', EDIBLE: False, TAKEABLE: False, USEABLE: False, DESCWORDS: ['Chest', 'Box', 'Crate', 'chest', 'box', 'crate'], }, 'Troll' : { SHORTDESC: 'A troll figure', GROUNDDESC : 'A troll is somewhere around here.', LONGDESC : 'A small troll figure carved from wood, you turn it over in your hands, an inscription on the base "RIP Inbag the Troll.", a disembodied scottish voice tells you to not put it in your bag.', EDIBLE : False, USEABLE: False, TAKEABLE : False, DESCWORDS : ['Troll', 'troll', 'figure', 'statue'], STORAGEDESC : '[Troll] The troll lies disgruntled in the chest, its dark in there, it might be eaten by a Grew.' }, 'Cassette' : { SHORTDESC: 'A cassette tape', GROUNDDESC: 'A cassette tape lies here on the floor, someone must have "dropped the bass".', LONGDESC: 'You turn the cassette tape over in your hands, the lable reads "Best of the 60s", it possibly contains Fleetwood Mac and thus must be destroyed immediately.', EDIBLE: False, USEABLE: False, TAKEABLE: True, DESCWORDS: ['tape', 'cassette', 'music tape', 'music'], STORAGEDESC : '[Tape] A tape lies in the bottom of the chest, we would have prefered you to burn it but this choice is yours.', }, 'Weevels' : { SHORTDESC: 'A pile of dead weevels', GROUNDDESC : 'A pile of rotting weeveles lay on the ground.', LONGDESC : 'Its a pile of fucking rotting weevels', EDIBLE : True, USEABLE : False, TAKEABLE : True, DESCWORDS: ['weevels', 'pile of weevels', 'rotting weevels'], STORAGEDESC : '[Weevels] A pile of rotting weevel husks lie at the bottom of the chest.' } } global default default = "" def displayLocation(loc, default): """A helper function for displaying an area's description and exits.""" # Print the room name. print(bcolors.start + loc + bcolors.end) print('=' * len(loc)) # Print the room's description (using textwrap.wrap()) print('\n'.join(textwrap.wrap(worldRooms[loc][DESC], SCREEN_WIDTH))) # Print all the items on the ground. if len(worldRooms[loc][GROUND]) > 0: print("") for item in worldRooms[loc][GROUND]: print(worldItems[item][GROUNDDESC]) try: # Check storage exists if len(worldRooms[loc][STORAGE]) > 0: print(bcolors.start + "The treasures you have accrewed thus far are (Chest) :" + bcolors.end) for item in worldRooms[loc][STORAGE]: print (worldItems[item][STORAGEDESC]) except KeyError: return default # Print all the exits. exits = [] for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[loc].keys(): exits.append(direction.title()) print("") if showFullExits: for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) else: print('Exits: %s' % ' '.join(exits)) def moveDirection(direction): """A helper function that changes the location of the player.""" global location if direction in worldRooms[location]: print('You move to the %s.' % direction) location = worldRooms[location][direction] displayLocation(location, default) else: print('You cannot move in that direction') def getAllDescWords(itemList): """Returns a list of "description words" for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.extend(worldItems[item][DESCWORDS]) return list(set(descWords)) def getAllFirstDescWords(itemList): """Returns a list of the first "description word" in the list of description words for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.append(worldItems[item][DESCWORDS][0]) return list(set(descWords)) def getFirstItemMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique for item in itemList: if desc in worldItems[item][DESCWORDS]: return item return None def getAllItemsMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique matchingItems = [] for item in itemList: if desc in worldItems[item][DESCWORDS]: matchingItems.append(item) return matchingItems class TextAdventureCmd(cmd.Cmd): prompt = '\n> ' # The default() method is called when none of the other do_*() command methods match. def default(self, arg): print('I do not understand that command. Type ' + bcolors.start + '"help"' + bcolors.end + ' for a list of commands.') # A very simple "quit" command to terminate the program: def do_quit(self, arg): """Quit the game.""" return True # this exits the Cmd application loop in TextAdventureCmd.cmdloop() def help_combat(self): print('Combat is not implemented in this program.') # These direction commands have a long (i.e. north) and show (i.e. n) form. # Since the code is basically the same, I put it in the moveDirection() # function. def do_north(self, arg): """Go to the area to the north, if possible.""" moveDirection('north') def do_south(self, arg): """Go to the area to the south, if possible.""" moveDirection('south') def do_east(self, arg): """Go to the area to the east, if possible.""" moveDirection('east') def do_west(self, arg): """Go to the area to the west, if possible.""" moveDirection('west') def do_up(self, arg): """Go to the area upwards, if possible.""" moveDirection('up') def do_down(self, arg): """Go to the area downwards, if possible.""" moveDirection('down') # Since the code is the exact same, we can just copy the # methods with shortened names: do_n = do_north do_s = do_south do_e = do_east do_w = do_west do_u = do_up do_d = do_down def do_exits(self, arg): """Toggle showing full exit descriptions or brief exit descriptions.""" global showFullExits showFullExits = not showFullExits if showFullExits: print('Showing full exit descriptions.') else: print('Showing brief exit descriptions.') def do_inventory(self, arg): """Display a list of the items in your possession.""" if len(inventory) == 0: print('Inventory:\n (nothing)') return # first get a count of each distinct item in the inventory itemCount = {} for item in inventory: if item in itemCount.keys(): itemCount[item] += 1 else: itemCount[item] = 1 # get a list of inventory items with duplicates removed: print('Inventory:') for item in set(inventory): if itemCount[item] > 1: print(' %s (%s)' % (item, itemCount[item])) else: print(' ' + item) do_inv = do_inventory do_i = do_inventory def do_take(self, arg): """"take <item> - Take an item on the ground.""" # put this value in a more suitably named variable itemToTake = arg.lower() if itemToTake == '': print('Take what? Type "look" the items on the ground here.') return cantTake = False # get the item name that the player's command describes for item in getAllItemsMatchingDesc(itemToTake, worldRooms[location][GROUND]): if worldItems[item].get(TAKEABLE, True) == False: cantTake = True continue # there may be other items named this that you can take, so we continue checking print("Taken.") worldRooms[location][GROUND].remove(item) # remove from the ground inventory.append(item) # add to inventory return # something funny if itemToTake == 'chest': print(bcolors.start + "Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git." + bcolors.end) return if cantTake: print('You cannot take "%s".' % (itemToTake)) else: print('That is not in or around the area, maybe it was your imagination?') def do_use(self, arg): """"use <item> - Use an item in in your inventory.""" itemToUse = arg.lower() if itemToUse == '': print('Use what? Type "inv" to see the items in your invetory.') return cantUse = False #look up the item the player describes invDescWords = getAllDescWords(inventory) if itemToUse not in invDescWords: print('You do not have that item to use it') return for item in getAllItemsMatchingDesc(itemToUse, inventory): if worldItems[item].get(USEABLE, True) == False: cantUse = True continue print('%s' % (worldItems[item][USEDESCTRUE])) #print('You use %s' % (worldItems[item][SHORTDESC])) #inventory.remove(item) return if cantUse: print('You cannot use "%s".' % (itemToUse)) else: print('You do not have that item to use.') def do_drop(self, arg): """"drop <item> - Drop an item from your inventory onto the ground.""" # put this value in a more suitably named variable itemToDrop = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # find out if the player doesn't have that item if itemToDrop not in invDescWords: print('You do not have "%s" in your inventory.' % (itemToDrop)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToDrop, inventory) if item != None: print('You drop %s.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][GROUND].append(item) # add to the ground # put items in a item container def do_put(self, arg): """"put <item> in <item> - Puts an item in a container.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('You want to put "%s" in what?!' % (itemToStore)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToStore, inventory) if item != None: print('You put %s. in the container.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][ITEMINV].append(item) # add to the container def do_store(self, arg): """"store <item> - Stores an item in a safe place, assuming that the room has a storage area.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore)) return # get the item name that the player's command describes try: item = getFirstItemMatchingDesc(itemToStore, inventory) # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item. if item != None: print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) inventory.remove(item) worldRooms[location][STORAGE].append(item) except KeyError: return("Don't even think about it buster brown.") #item = getFirstItemMatchingDesc(itemToStore, inventory) #if item != None: # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) # inventory.remove(item) # remove from inventory # worldRooms[location][STORAGE].append(item) # add to the container def complete_take(self, text, line, begidx, endidx): possibleItems = [] text = text.lower() # if the user has only typed "take" but no item name: if not text: return getAllFirstDescWords(worldRooms[location][GROUND]) # otherwise, get a list of all "description words" for ground items matching the command text so far: for item in list(set(worldRooms[location][GROUND])): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(TAKEABLE, True): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique def complete_drop(self, text, line, begidx, endidx):
def do_look(self, arg): """Look at an item, direction, or the area: "look" - display the current area's description "look <direction>" - display the description of the area in that direction "look exits" - display the description of all adjacent areas "look <item>" - display the description of an item on the ground in storage or in your inventory""" lookingAt = arg.lower() if lookingAt == '': # "look" will re-print the area description displayLocation(location, default) return if lookingAt == 'exits': for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) return if lookingAt in ('north', 'west', 'east', 'south', 'up', 'down', 'n', 'w', 'e', 's', 'u', 'd'): if lookingAt.startswith('n') and NORTH in worldRooms[location]: print(worldRooms[location][NORTH]) elif lookingAt.startswith('w') and WEST in worldRooms[location]: print(worldRooms[location][WEST]) elif lookingAt.startswith('e') and EAST in worldRooms[location]: print(worldRooms[location][EAST]) elif lookingAt.startswith('s') and SOUTH in worldRooms[location]: print(worldRooms[location][SOUTH]) elif lookingAt.startswith('u') and UP in worldRooms[location]: print(worldRooms[location][UP]) elif lookingAt.startswith('d') and DOWN in worldRooms[location]: print(worldRooms[location][DOWN]) else: print('There is nothing in that direction.') return # see if the item being looked at is on the ground at this location or in storage. #item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND][STORAGE]) item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND]) if item != None: print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return # see if the item being looked at is in the inventory item = getFirstItemMatchingDesc(lookingAt, inventory) if item != None: print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return print('You do not see that nearby.') def complete_look(self, text, line, begidx, endidx): possibleItems = [] lookingAt = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) groundDescWords = getAllDescWords(worldRooms[location][GROUND]) for descWord in invDescWords + groundDescWords + [NORTH, SOUTH, EAST, WEST, UP, DOWN]: if line.startswith('look %s' % (descWord)): return [] # command is complete # if the user has only typed "look" but no item name, show all items on ground and directions: if lookingAt == '': possibleItems.extend(getAllFirstDescWords(worldRooms[location][GROUND][STORAGE])) for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: possibleItems.append(direction) return list(set(possibleItems)) # make list unique # otherwise, get a list of all "description words" for ground items matching the command text so far: for descWord in groundDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) # check for matching directions for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction.startswith(lookingAt): possibleItems.append(direction) # get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique #arg = arg.lower() # Extra ways of writing commands do_read = do_look do_l = do_look def do_eat(self, arg): """"eat <item>" - eat an item in your inventory.""" itemToEat = arg.lower() if itemToEat == '': print('Eat what? Type "inventory" or "inv" to see whats in your inventory.') return cantEat = False for item in getAllItemsMatchingDesc(itemToEat, inventory): if worldItems[item].get(EDIBLE, False) == False: cantEat = True continue # there may be other items named this that you can eat, so we continue checking # NOTE - If you wanted to implement hunger levels, here is where # you would add code that changes the player's hunger level. print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC])) inventory.remove(item) return if cantEat: print('I dont think the "%s" would like you to do that...' % (worldItems[item][SHORTDESC])) else: print('You do not have "%s". Type "inventory" or "inv" to see what in your inventory.' % (itemToEat)) def complete_eat(self, text, line, begidx, endidx): itemToEat = text.lower() possibleItems = [] # if the user has only typed "eat" but no item name: if itemToEat == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for edible inventory items matching the command text so far: for item in list(set(inventory)): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(EDIBLE, False): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique do_exit = do_quit # another way of exiting the game with a differnt word def do_clear(self, arg): """"clear" - Clear all text from the screen.""" if platform.system == "Windows": os.system("cls") else: os.system("clear") # :) def do_hacf(self, arg): if 'Troll' in worldRooms['Main Hall'][STORAGE]: print(bcolors.start + 'A patch will be released soon, you have compinsated, please do not take us to court.' + bcolors.end) else: """hacf - ### ?????? ###""" fake_error = ValueError('ERROR : The spacetime continum has been breached, all you know is a lie.') print(fake_error) print(bcolors.start + "The developer is sorry for this bug, we have deposited something nice for you in the chest." + bcolors.end) #worldItems[item][DESCWORDS] item = 'Troll' worldRooms['Main Hall'][STORAGE].append(item) worldRooms['Main Hall'][GROUND].remove(item) if __name__ == '__main__': print(bcolors.start + ' Escape the house ' + bcolors.end) print('====================') print("") print('[Type "help" for commands.]') print("") displayLocation(location, default) TextAdventureCmd().cmdloop() if len(STORAGE) > 15: print("You hear a soft click, could it be a trap?, Gnomes?!, *GASP* swamp folk... the worst, no its just the door opening, you are free to leave.") print("Congratulations are in order, you found evenything however turns out you could have just opened the door it was never locked at all... good job though..") print('Looks like you are going to be stuck here for a very, very long time.')
possibleItems = [] itemToDrop = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) for descWord in invDescWords: if line.startswith('drop %s' % (descWord)): return [] # command is complete # if the user has only typed "drop" but no item name: if itemToDrop == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(text): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique
identifier_body
main.py
#!/usr/bin/python # __ _ _ # /__\__ ___ __ _ _ __ ___ | |_| |__ ___ /\ /\___ _ _ ___ ___ # /_\/ __|/ __/ _` | '_ \ / _ \ | __| '_ \ / _ \ / /_/ / _ \| | | / __|/ _ \ # //__\__ \ (_| (_| | |_) | __/ | |_| | | | __/ / __ / (_) | |_| \__ \ __/ # \__/|___/\___\__,_| .__/ \___| \__|_| |_|\___| \/ /_/ \___/ \__,_|___/\___| # |_| # Modified By Sam Scotford # Credits to Al Sweigart for bulk of sorce code # - https://inventwithpython.com/blog/2014/12/11/making-a-text-adventure-game-with-the-cmd-and-textwrap-python-modules/ # # To-Do: # Have a lighting system like in Zork, where the player must have a torch to enter certain rooms # Have some combat # Items within items needs fixing / implementing # Fix bug where trying to store items outside of the mainhall produces an error # Ui Improvements import _random import datetime #import player_class # Player class external import os import cmd import textwrap import colorama import ctypes import platform # Output colors colorama.init() # Set Window title #ctypes.windll.kernel32.SetConsoleTitleA("Escape The House v5.0.9") class bcolors: start = "\033[1;31m" end = "\033[0;0m" # Constent variables DEFAULT = '' DESC = 'desc' NORTH = 'north' SOUTH = 'south' EAST = 'east' WEST = 'west' UP = 'up' DOWN = 'down' GROUND = 'ground' STORAGE = 'storage' DOOR = 'door' HIDDEN = 'hidden' GROUNDDESC = 'grounddesc' INVDESC = 'invdesc' SHORTDESC = 'shortdesc' LONGDESC = 'longdesc' TAKEABLE = 'takeable' STORABLE = 'storeable' # able to be stored in a bag / items inventory STORAGE = 'storage' STORAGEDESC = 'storagedesc' EDIBLE = 'edible' USEABLE = 'usable' TOGGLE = 'TOGGLE' USEDESCTRUE = 'usedesctrue' USEDESCFALSE = 'usedescfalse' ITEMINV = 'iteminv' ITEM_INVENTORY = [] DESCWORDS = 'descwords' SCREEN_WIDTH = 80 location = 'Entrance Hall' # Start here inventory = ['Personal ID'] showFullExits = False # Define rooms here worldRooms = { 'Entrance Hall': { DESC: "You stand in a hallway with a single door infront of you, the room is dimly lit from a single bulb that flickers occosionally.", NORTH: 'Main Hall', GROUND: ['Old Key', 'Note', 'Sack'], }, 'Main Hall': { DESC: 'You stand in the main hall there are debris everywhere you look it looks a bomb went off in here, there is also a small troll statue on the side table.', SOUTH: 'Entrance Hall', EAST: 'Kitchen', WEST: 'Library', UP: '2nd Floor', GROUND: ['Torch', 'Chest', 'Troll'], STORAGE: [], # Used to store items and win the game }, 'Kitchen' : { DESC: "You stand in what appears to be a large kitchen, pots and pans hang from the walls and though dusty look quite well used.", SOUTH: 'Main Hall', EAST: 'Garden', GROUND: '', }, 'Garden' : { DESC: "You are in an overgrown garden, a prestine lawn-mower lies in the undergrown somewhat ironically, a tree house can be seen in the distance", WEST: "Kitchen", UP: "Treehouse", GROUND: '', }, "Treehouse" : { DESC: "With much effort on your part and that of the rather brittle ladders, you make it to the top, a strong breeze shakes the tree and you being to feel rather ill.", NORTH: "Flimsy Branch", DOWN: "Garden", GROUND: "", }, 'Library': { DESC: "You walk into the Library, the smell of dusty books invades your nostrils. There is a valuted ceiling here and the moonlight shines into the room through stained glass windows making dancing shapes on the bookcases.", EAST: 'Main Hall', GROUND: ['Dusty Books', 'Gun'], }, 'Library 2nd Floor': { DESC : "You pass thorugh a creaky door on your way to the second floor libary.", EAST : "2nd Floor", GROUND : "", }, '2nd Floor' : { DESC: "You go up the spiraling staircase onto the second floor, the first was blocked by an impassable door, you look down and have sudden virtigo from the height, you turn around.", DOWN: 'Main Hall', WEST : 'Library 2nd Floor', EAST : 'Bedroom', UP : 'Attic', GROUND:['Cassette'] }, 'Attic' : { DESC : "Above you is an old wooden hatch with a brass ring, using your feeble legs you give up trying to reach for it and instead grab a hook from beside you and leaver the creaky door open, about a million weevels fall from the opening. You climb inside.", DOWN : "2nd Floor", UP: "Roof", GROUND : ['Weevels'] } } # Define items in the world worldItems = { 'Old Key': { GROUNDDESC: 'A dull brass key lies on the ground here.', SHORTDESC: 'Old key', LONGDESC: 'The old key has intricate inscriptions on it, it is covered in dust from years of unuse, it is cool to the touch.', TAKEABLE: True, EDIBLE: False, USEABLE: True, DESCWORDS: ['old', 'key'], STORAGEDESC: '[The old key] is within the chest, its like leaving the car keys in the in the car and regretting ever being born, good job..' }, 'Personal ID': { GROUNDDESC: 'Your personal identification card lies on the floor.', SHORTDESC: 'Your ID card.', LONGDESC: 'Through much mental anguish you read your card, You are Henry Burton, your age is 34 and have no idea of where you are.', TAKEABLE: True, DESCWORDS: ['id', 'card'], EDIBLE: False, USEABLE: False, STORAGEDESC: '[ID] Not even the blackness of the chest can hide your ugly mug, not even the weevles will go near it.', }, 'Note': { GROUNDDESC: 'A note lies here, its hard to make out what it says from where you are', SHORTDESC: 'Note', LONGDESC: 'Its a note and it reads "There was no mailbox outside to leave this in so I just left it here.."', TAKEABLE: True, EDIBLE: True, USEABLE: True, USEDESCTRUE: 'You use the note, and wipe your snotty nose with it, now you have a snotty note, gross.', DESCWORDS: ['note'], STORAGEDESC : '[Note] Not sure when you want this in here, but it is regardless..' }, 'Torch': { GROUNDDESC: 'A typical torch / flashlight lies on the floor', SHORTDESC: 'Torch / Flashlight.', LONGDESC: 'The torch / flashlight emits a soft flow, just enough to light the way.', EDIBLE: False, USEABLE: True, TOGGLE: False, USEDESCTRUE: 'You click the top of the touch, a beam illuminates your way, you can now be lost and see what you are doing.', USEDESCFALSE: 'YOu click the top of the touch, you think "Hey who turned out the lights.", the torch stays on, you bust the switch with your meaty fingers.. good job.', DESCWORDS: ['torch', 'flashlight'], STORAGEDESC: '[Torch] In the blackness of the chest not even the torches light esacpes.. spooky.', }, 'Dusty Books': { GROUNDDESC: 'A pile of dusty books lies on the floor near a reading desk.', SHORTDESC: 'Dusty Books', LONGDESC: 'A pile of dusty old books pages half rotting away, its hard to make out what is written in them, Hitchickers Guide to the Galaxy, How to stew a ham in 43 different ways and various other, written, human detritus.', EDIBLE: False, USEABLE: False, DESCWORDS: ['books','book'], STORAGEDESC: '[Dusty Books] The books lie at the bottom of the chest looking miserable.' }, 'Gun': { GROUNDDESC: 'A gun lies on the floor here.', SHORTDESC: 'Gun', LONGDESC: 'A 32 ACP revolver it has 5 chaimbers, one of the cartridges has been fired.', EDIBLE: False, USEABLE: True, DESCWORDS: ['Gun','gun','revolver'], STORAGEDESC: '[Gun] Better the gun be in here then in my hands..', }, 'Sack': { GROUNDDESC: 'A sack of burlap lies on the floor here', SHORTDESC: 'Sack', LONGDESC: 'Its an old sack used for storing things in, it smells like onions.', EDIBLE: False, DESCWORDS: ['Sack', 'bag', 'sack'], STORAGEDESC: '[Sack] A container with in a container, its like that terrible movie with Leonardo DiCaprio..', # Attempting "Items within Items" ITEMINV: ['Lunch'], }, 'Chest' : { SHORTDESC: 'A wooden chest', GROUNDDESC: 'A wooden chest resides in the far corner of this room with an incription on it.', LONGDESC: 'Its an old wooden chest with the inscription "Por viaj malmolaj gajnitaj eroj." the language begins with an Esp... you know that much.', EDIBLE: False, TAKEABLE: False, USEABLE: False, DESCWORDS: ['Chest', 'Box', 'Crate', 'chest', 'box', 'crate'], }, 'Troll' : { SHORTDESC: 'A troll figure', GROUNDDESC : 'A troll is somewhere around here.', LONGDESC : 'A small troll figure carved from wood, you turn it over in your hands, an inscription on the base "RIP Inbag the Troll.", a disembodied scottish voice tells you to not put it in your bag.', EDIBLE : False, USEABLE: False, TAKEABLE : False, DESCWORDS : ['Troll', 'troll', 'figure', 'statue'], STORAGEDESC : '[Troll] The troll lies disgruntled in the chest, its dark in there, it might be eaten by a Grew.' }, 'Cassette' : { SHORTDESC: 'A cassette tape', GROUNDDESC: 'A cassette tape lies here on the floor, someone must have "dropped the bass".', LONGDESC: 'You turn the cassette tape over in your hands, the lable reads "Best of the 60s", it possibly contains Fleetwood Mac and thus must be destroyed immediately.', EDIBLE: False, USEABLE: False, TAKEABLE: True, DESCWORDS: ['tape', 'cassette', 'music tape', 'music'], STORAGEDESC : '[Tape] A tape lies in the bottom of the chest, we would have prefered you to burn it but this choice is yours.', }, 'Weevels' : { SHORTDESC: 'A pile of dead weevels', GROUNDDESC : 'A pile of rotting weeveles lay on the ground.', LONGDESC : 'Its a pile of fucking rotting weevels', EDIBLE : True, USEABLE : False, TAKEABLE : True, DESCWORDS: ['weevels', 'pile of weevels', 'rotting weevels'], STORAGEDESC : '[Weevels] A pile of rotting weevel husks lie at the bottom of the chest.' } } global default default = "" def displayLocation(loc, default): """A helper function for displaying an area's description and exits.""" # Print the room name. print(bcolors.start + loc + bcolors.end) print('=' * len(loc)) # Print the room's description (using textwrap.wrap()) print('\n'.join(textwrap.wrap(worldRooms[loc][DESC], SCREEN_WIDTH))) # Print all the items on the ground. if len(worldRooms[loc][GROUND]) > 0: print("") for item in worldRooms[loc][GROUND]: print(worldItems[item][GROUNDDESC]) try: # Check storage exists if len(worldRooms[loc][STORAGE]) > 0: print(bcolors.start + "The treasures you have accrewed thus far are (Chest) :" + bcolors.end) for item in worldRooms[loc][STORAGE]: print (worldItems[item][STORAGEDESC]) except KeyError: return default # Print all the exits. exits = [] for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[loc].keys(): exits.append(direction.title()) print("") if showFullExits: for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) else: print('Exits: %s' % ' '.join(exits)) def moveDirection(direction): """A helper function that changes the location of the player.""" global location if direction in worldRooms[location]: print('You move to the %s.' % direction) location = worldRooms[location][direction] displayLocation(location, default) else: print('You cannot move in that direction') def getAllDescWords(itemList): """Returns a list of "description words" for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.extend(worldItems[item][DESCWORDS]) return list(set(descWords)) def getAllFirstDescWords(itemList): """Returns a list of the first "description word" in the list of description words for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.append(worldItems[item][DESCWORDS][0]) return list(set(descWords)) def getFirstItemMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique for item in itemList: if desc in worldItems[item][DESCWORDS]: return item return None def getAllItemsMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique matchingItems = [] for item in itemList: if desc in worldItems[item][DESCWORDS]: matchingItems.append(item) return matchingItems class TextAdventureCmd(cmd.Cmd): prompt = '\n> ' # The default() method is called when none of the other do_*() command methods match. def default(self, arg): print('I do not understand that command. Type ' + bcolors.start + '"help"' + bcolors.end + ' for a list of commands.') # A very simple "quit" command to terminate the program: def do_quit(self, arg): """Quit the game.""" return True # this exits the Cmd application loop in TextAdventureCmd.cmdloop() def help_combat(self): print('Combat is not implemented in this program.') # These direction commands have a long (i.e. north) and show (i.e. n) form. # Since the code is basically the same, I put it in the moveDirection() # function. def do_north(self, arg): """Go to the area to the north, if possible.""" moveDirection('north') def do_south(self, arg): """Go to the area to the south, if possible.""" moveDirection('south') def do_east(self, arg): """Go to the area to the east, if possible.""" moveDirection('east') def do_west(self, arg): """Go to the area to the west, if possible.""" moveDirection('west') def do_up(self, arg): """Go to the area upwards, if possible.""" moveDirection('up') def do_down(self, arg): """Go to the area downwards, if possible.""" moveDirection('down') # Since the code is the exact same, we can just copy the # methods with shortened names: do_n = do_north do_s = do_south do_e = do_east do_w = do_west do_u = do_up do_d = do_down def do_exits(self, arg): """Toggle showing full exit descriptions or brief exit descriptions.""" global showFullExits showFullExits = not showFullExits if showFullExits: print('Showing full exit descriptions.') else:
def do_inventory(self, arg): """Display a list of the items in your possession.""" if len(inventory) == 0: print('Inventory:\n (nothing)') return # first get a count of each distinct item in the inventory itemCount = {} for item in inventory: if item in itemCount.keys(): itemCount[item] += 1 else: itemCount[item] = 1 # get a list of inventory items with duplicates removed: print('Inventory:') for item in set(inventory): if itemCount[item] > 1: print(' %s (%s)' % (item, itemCount[item])) else: print(' ' + item) do_inv = do_inventory do_i = do_inventory def do_take(self, arg): """"take <item> - Take an item on the ground.""" # put this value in a more suitably named variable itemToTake = arg.lower() if itemToTake == '': print('Take what? Type "look" the items on the ground here.') return cantTake = False # get the item name that the player's command describes for item in getAllItemsMatchingDesc(itemToTake, worldRooms[location][GROUND]): if worldItems[item].get(TAKEABLE, True) == False: cantTake = True continue # there may be other items named this that you can take, so we continue checking print("Taken.") worldRooms[location][GROUND].remove(item) # remove from the ground inventory.append(item) # add to inventory return # something funny if itemToTake == 'chest': print(bcolors.start + "Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git." + bcolors.end) return if cantTake: print('You cannot take "%s".' % (itemToTake)) else: print('That is not in or around the area, maybe it was your imagination?') def do_use(self, arg): """"use <item> - Use an item in in your inventory.""" itemToUse = arg.lower() if itemToUse == '': print('Use what? Type "inv" to see the items in your invetory.') return cantUse = False #look up the item the player describes invDescWords = getAllDescWords(inventory) if itemToUse not in invDescWords: print('You do not have that item to use it') return for item in getAllItemsMatchingDesc(itemToUse, inventory): if worldItems[item].get(USEABLE, True) == False: cantUse = True continue print('%s' % (worldItems[item][USEDESCTRUE])) #print('You use %s' % (worldItems[item][SHORTDESC])) #inventory.remove(item) return if cantUse: print('You cannot use "%s".' % (itemToUse)) else: print('You do not have that item to use.') def do_drop(self, arg): """"drop <item> - Drop an item from your inventory onto the ground.""" # put this value in a more suitably named variable itemToDrop = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # find out if the player doesn't have that item if itemToDrop not in invDescWords: print('You do not have "%s" in your inventory.' % (itemToDrop)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToDrop, inventory) if item != None: print('You drop %s.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][GROUND].append(item) # add to the ground # put items in a item container def do_put(self, arg): """"put <item> in <item> - Puts an item in a container.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('You want to put "%s" in what?!' % (itemToStore)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToStore, inventory) if item != None: print('You put %s. in the container.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][ITEMINV].append(item) # add to the container def do_store(self, arg): """"store <item> - Stores an item in a safe place, assuming that the room has a storage area.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore)) return # get the item name that the player's command describes try: item = getFirstItemMatchingDesc(itemToStore, inventory) # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item. if item != None: print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) inventory.remove(item) worldRooms[location][STORAGE].append(item) except KeyError: return("Don't even think about it buster brown.") #item = getFirstItemMatchingDesc(itemToStore, inventory) #if item != None: # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) # inventory.remove(item) # remove from inventory # worldRooms[location][STORAGE].append(item) # add to the container def complete_take(self, text, line, begidx, endidx): possibleItems = [] text = text.lower() # if the user has only typed "take" but no item name: if not text: return getAllFirstDescWords(worldRooms[location][GROUND]) # otherwise, get a list of all "description words" for ground items matching the command text so far: for item in list(set(worldRooms[location][GROUND])): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(TAKEABLE, True): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique def complete_drop(self, text, line, begidx, endidx): possibleItems = [] itemToDrop = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) for descWord in invDescWords: if line.startswith('drop %s' % (descWord)): return [] # command is complete # if the user has only typed "drop" but no item name: if itemToDrop == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(text): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique def do_look(self, arg): """Look at an item, direction, or the area: "look" - display the current area's description "look <direction>" - display the description of the area in that direction "look exits" - display the description of all adjacent areas "look <item>" - display the description of an item on the ground in storage or in your inventory""" lookingAt = arg.lower() if lookingAt == '': # "look" will re-print the area description displayLocation(location, default) return if lookingAt == 'exits': for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) return if lookingAt in ('north', 'west', 'east', 'south', 'up', 'down', 'n', 'w', 'e', 's', 'u', 'd'): if lookingAt.startswith('n') and NORTH in worldRooms[location]: print(worldRooms[location][NORTH]) elif lookingAt.startswith('w') and WEST in worldRooms[location]: print(worldRooms[location][WEST]) elif lookingAt.startswith('e') and EAST in worldRooms[location]: print(worldRooms[location][EAST]) elif lookingAt.startswith('s') and SOUTH in worldRooms[location]: print(worldRooms[location][SOUTH]) elif lookingAt.startswith('u') and UP in worldRooms[location]: print(worldRooms[location][UP]) elif lookingAt.startswith('d') and DOWN in worldRooms[location]: print(worldRooms[location][DOWN]) else: print('There is nothing in that direction.') return # see if the item being looked at is on the ground at this location or in storage. #item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND][STORAGE]) item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND]) if item != None: print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return # see if the item being looked at is in the inventory item = getFirstItemMatchingDesc(lookingAt, inventory) if item != None: print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return print('You do not see that nearby.') def complete_look(self, text, line, begidx, endidx): possibleItems = [] lookingAt = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) groundDescWords = getAllDescWords(worldRooms[location][GROUND]) for descWord in invDescWords + groundDescWords + [NORTH, SOUTH, EAST, WEST, UP, DOWN]: if line.startswith('look %s' % (descWord)): return [] # command is complete # if the user has only typed "look" but no item name, show all items on ground and directions: if lookingAt == '': possibleItems.extend(getAllFirstDescWords(worldRooms[location][GROUND][STORAGE])) for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: possibleItems.append(direction) return list(set(possibleItems)) # make list unique # otherwise, get a list of all "description words" for ground items matching the command text so far: for descWord in groundDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) # check for matching directions for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction.startswith(lookingAt): possibleItems.append(direction) # get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique #arg = arg.lower() # Extra ways of writing commands do_read = do_look do_l = do_look def do_eat(self, arg): """"eat <item>" - eat an item in your inventory.""" itemToEat = arg.lower() if itemToEat == '': print('Eat what? Type "inventory" or "inv" to see whats in your inventory.') return cantEat = False for item in getAllItemsMatchingDesc(itemToEat, inventory): if worldItems[item].get(EDIBLE, False) == False: cantEat = True continue # there may be other items named this that you can eat, so we continue checking # NOTE - If you wanted to implement hunger levels, here is where # you would add code that changes the player's hunger level. print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC])) inventory.remove(item) return if cantEat: print('I dont think the "%s" would like you to do that...' % (worldItems[item][SHORTDESC])) else: print('You do not have "%s". Type "inventory" or "inv" to see what in your inventory.' % (itemToEat)) def complete_eat(self, text, line, begidx, endidx): itemToEat = text.lower() possibleItems = [] # if the user has only typed "eat" but no item name: if itemToEat == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for edible inventory items matching the command text so far: for item in list(set(inventory)): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(EDIBLE, False): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique do_exit = do_quit # another way of exiting the game with a differnt word def do_clear(self, arg): """"clear" - Clear all text from the screen.""" if platform.system == "Windows": os.system("cls") else: os.system("clear") # :) def do_hacf(self, arg): if 'Troll' in worldRooms['Main Hall'][STORAGE]: print(bcolors.start + 'A patch will be released soon, you have compinsated, please do not take us to court.' + bcolors.end) else: """hacf - ### ?????? ###""" fake_error = ValueError('ERROR : The spacetime continum has been breached, all you know is a lie.') print(fake_error) print(bcolors.start + "The developer is sorry for this bug, we have deposited something nice for you in the chest." + bcolors.end) #worldItems[item][DESCWORDS] item = 'Troll' worldRooms['Main Hall'][STORAGE].append(item) worldRooms['Main Hall'][GROUND].remove(item) if __name__ == '__main__': print(bcolors.start + ' Escape the house ' + bcolors.end) print('====================') print("") print('[Type "help" for commands.]') print("") displayLocation(location, default) TextAdventureCmd().cmdloop() if len(STORAGE) > 15: print("You hear a soft click, could it be a trap?, Gnomes?!, *GASP* swamp folk... the worst, no its just the door opening, you are free to leave.") print("Congratulations are in order, you found evenything however turns out you could have just opened the door it was never locked at all... good job though..") print('Looks like you are going to be stuck here for a very, very long time.')
print('Showing brief exit descriptions.')
random_line_split
main.py
#!/usr/bin/python # __ _ _ # /__\__ ___ __ _ _ __ ___ | |_| |__ ___ /\ /\___ _ _ ___ ___ # /_\/ __|/ __/ _` | '_ \ / _ \ | __| '_ \ / _ \ / /_/ / _ \| | | / __|/ _ \ # //__\__ \ (_| (_| | |_) | __/ | |_| | | | __/ / __ / (_) | |_| \__ \ __/ # \__/|___/\___\__,_| .__/ \___| \__|_| |_|\___| \/ /_/ \___/ \__,_|___/\___| # |_| # Modified By Sam Scotford # Credits to Al Sweigart for bulk of sorce code # - https://inventwithpython.com/blog/2014/12/11/making-a-text-adventure-game-with-the-cmd-and-textwrap-python-modules/ # # To-Do: # Have a lighting system like in Zork, where the player must have a torch to enter certain rooms # Have some combat # Items within items needs fixing / implementing # Fix bug where trying to store items outside of the mainhall produces an error # Ui Improvements import _random import datetime #import player_class # Player class external import os import cmd import textwrap import colorama import ctypes import platform # Output colors colorama.init() # Set Window title #ctypes.windll.kernel32.SetConsoleTitleA("Escape The House v5.0.9") class bcolors: start = "\033[1;31m" end = "\033[0;0m" # Constent variables DEFAULT = '' DESC = 'desc' NORTH = 'north' SOUTH = 'south' EAST = 'east' WEST = 'west' UP = 'up' DOWN = 'down' GROUND = 'ground' STORAGE = 'storage' DOOR = 'door' HIDDEN = 'hidden' GROUNDDESC = 'grounddesc' INVDESC = 'invdesc' SHORTDESC = 'shortdesc' LONGDESC = 'longdesc' TAKEABLE = 'takeable' STORABLE = 'storeable' # able to be stored in a bag / items inventory STORAGE = 'storage' STORAGEDESC = 'storagedesc' EDIBLE = 'edible' USEABLE = 'usable' TOGGLE = 'TOGGLE' USEDESCTRUE = 'usedesctrue' USEDESCFALSE = 'usedescfalse' ITEMINV = 'iteminv' ITEM_INVENTORY = [] DESCWORDS = 'descwords' SCREEN_WIDTH = 80 location = 'Entrance Hall' # Start here inventory = ['Personal ID'] showFullExits = False # Define rooms here worldRooms = { 'Entrance Hall': { DESC: "You stand in a hallway with a single door infront of you, the room is dimly lit from a single bulb that flickers occosionally.", NORTH: 'Main Hall', GROUND: ['Old Key', 'Note', 'Sack'], }, 'Main Hall': { DESC: 'You stand in the main hall there are debris everywhere you look it looks a bomb went off in here, there is also a small troll statue on the side table.', SOUTH: 'Entrance Hall', EAST: 'Kitchen', WEST: 'Library', UP: '2nd Floor', GROUND: ['Torch', 'Chest', 'Troll'], STORAGE: [], # Used to store items and win the game }, 'Kitchen' : { DESC: "You stand in what appears to be a large kitchen, pots and pans hang from the walls and though dusty look quite well used.", SOUTH: 'Main Hall', EAST: 'Garden', GROUND: '', }, 'Garden' : { DESC: "You are in an overgrown garden, a prestine lawn-mower lies in the undergrown somewhat ironically, a tree house can be seen in the distance", WEST: "Kitchen", UP: "Treehouse", GROUND: '', }, "Treehouse" : { DESC: "With much effort on your part and that of the rather brittle ladders, you make it to the top, a strong breeze shakes the tree and you being to feel rather ill.", NORTH: "Flimsy Branch", DOWN: "Garden", GROUND: "", }, 'Library': { DESC: "You walk into the Library, the smell of dusty books invades your nostrils. There is a valuted ceiling here and the moonlight shines into the room through stained glass windows making dancing shapes on the bookcases.", EAST: 'Main Hall', GROUND: ['Dusty Books', 'Gun'], }, 'Library 2nd Floor': { DESC : "You pass thorugh a creaky door on your way to the second floor libary.", EAST : "2nd Floor", GROUND : "", }, '2nd Floor' : { DESC: "You go up the spiraling staircase onto the second floor, the first was blocked by an impassable door, you look down and have sudden virtigo from the height, you turn around.", DOWN: 'Main Hall', WEST : 'Library 2nd Floor', EAST : 'Bedroom', UP : 'Attic', GROUND:['Cassette'] }, 'Attic' : { DESC : "Above you is an old wooden hatch with a brass ring, using your feeble legs you give up trying to reach for it and instead grab a hook from beside you and leaver the creaky door open, about a million weevels fall from the opening. You climb inside.", DOWN : "2nd Floor", UP: "Roof", GROUND : ['Weevels'] } } # Define items in the world worldItems = { 'Old Key': { GROUNDDESC: 'A dull brass key lies on the ground here.', SHORTDESC: 'Old key', LONGDESC: 'The old key has intricate inscriptions on it, it is covered in dust from years of unuse, it is cool to the touch.', TAKEABLE: True, EDIBLE: False, USEABLE: True, DESCWORDS: ['old', 'key'], STORAGEDESC: '[The old key] is within the chest, its like leaving the car keys in the in the car and regretting ever being born, good job..' }, 'Personal ID': { GROUNDDESC: 'Your personal identification card lies on the floor.', SHORTDESC: 'Your ID card.', LONGDESC: 'Through much mental anguish you read your card, You are Henry Burton, your age is 34 and have no idea of where you are.', TAKEABLE: True, DESCWORDS: ['id', 'card'], EDIBLE: False, USEABLE: False, STORAGEDESC: '[ID] Not even the blackness of the chest can hide your ugly mug, not even the weevles will go near it.', }, 'Note': { GROUNDDESC: 'A note lies here, its hard to make out what it says from where you are', SHORTDESC: 'Note', LONGDESC: 'Its a note and it reads "There was no mailbox outside to leave this in so I just left it here.."', TAKEABLE: True, EDIBLE: True, USEABLE: True, USEDESCTRUE: 'You use the note, and wipe your snotty nose with it, now you have a snotty note, gross.', DESCWORDS: ['note'], STORAGEDESC : '[Note] Not sure when you want this in here, but it is regardless..' }, 'Torch': { GROUNDDESC: 'A typical torch / flashlight lies on the floor', SHORTDESC: 'Torch / Flashlight.', LONGDESC: 'The torch / flashlight emits a soft flow, just enough to light the way.', EDIBLE: False, USEABLE: True, TOGGLE: False, USEDESCTRUE: 'You click the top of the touch, a beam illuminates your way, you can now be lost and see what you are doing.', USEDESCFALSE: 'YOu click the top of the touch, you think "Hey who turned out the lights.", the torch stays on, you bust the switch with your meaty fingers.. good job.', DESCWORDS: ['torch', 'flashlight'], STORAGEDESC: '[Torch] In the blackness of the chest not even the torches light esacpes.. spooky.', }, 'Dusty Books': { GROUNDDESC: 'A pile of dusty books lies on the floor near a reading desk.', SHORTDESC: 'Dusty Books', LONGDESC: 'A pile of dusty old books pages half rotting away, its hard to make out what is written in them, Hitchickers Guide to the Galaxy, How to stew a ham in 43 different ways and various other, written, human detritus.', EDIBLE: False, USEABLE: False, DESCWORDS: ['books','book'], STORAGEDESC: '[Dusty Books] The books lie at the bottom of the chest looking miserable.' }, 'Gun': { GROUNDDESC: 'A gun lies on the floor here.', SHORTDESC: 'Gun', LONGDESC: 'A 32 ACP revolver it has 5 chaimbers, one of the cartridges has been fired.', EDIBLE: False, USEABLE: True, DESCWORDS: ['Gun','gun','revolver'], STORAGEDESC: '[Gun] Better the gun be in here then in my hands..', }, 'Sack': { GROUNDDESC: 'A sack of burlap lies on the floor here', SHORTDESC: 'Sack', LONGDESC: 'Its an old sack used for storing things in, it smells like onions.', EDIBLE: False, DESCWORDS: ['Sack', 'bag', 'sack'], STORAGEDESC: '[Sack] A container with in a container, its like that terrible movie with Leonardo DiCaprio..', # Attempting "Items within Items" ITEMINV: ['Lunch'], }, 'Chest' : { SHORTDESC: 'A wooden chest', GROUNDDESC: 'A wooden chest resides in the far corner of this room with an incription on it.', LONGDESC: 'Its an old wooden chest with the inscription "Por viaj malmolaj gajnitaj eroj." the language begins with an Esp... you know that much.', EDIBLE: False, TAKEABLE: False, USEABLE: False, DESCWORDS: ['Chest', 'Box', 'Crate', 'chest', 'box', 'crate'], }, 'Troll' : { SHORTDESC: 'A troll figure', GROUNDDESC : 'A troll is somewhere around here.', LONGDESC : 'A small troll figure carved from wood, you turn it over in your hands, an inscription on the base "RIP Inbag the Troll.", a disembodied scottish voice tells you to not put it in your bag.', EDIBLE : False, USEABLE: False, TAKEABLE : False, DESCWORDS : ['Troll', 'troll', 'figure', 'statue'], STORAGEDESC : '[Troll] The troll lies disgruntled in the chest, its dark in there, it might be eaten by a Grew.' }, 'Cassette' : { SHORTDESC: 'A cassette tape', GROUNDDESC: 'A cassette tape lies here on the floor, someone must have "dropped the bass".', LONGDESC: 'You turn the cassette tape over in your hands, the lable reads "Best of the 60s", it possibly contains Fleetwood Mac and thus must be destroyed immediately.', EDIBLE: False, USEABLE: False, TAKEABLE: True, DESCWORDS: ['tape', 'cassette', 'music tape', 'music'], STORAGEDESC : '[Tape] A tape lies in the bottom of the chest, we would have prefered you to burn it but this choice is yours.', }, 'Weevels' : { SHORTDESC: 'A pile of dead weevels', GROUNDDESC : 'A pile of rotting weeveles lay on the ground.', LONGDESC : 'Its a pile of fucking rotting weevels', EDIBLE : True, USEABLE : False, TAKEABLE : True, DESCWORDS: ['weevels', 'pile of weevels', 'rotting weevels'], STORAGEDESC : '[Weevels] A pile of rotting weevel husks lie at the bottom of the chest.' } } global default default = "" def displayLocation(loc, default): """A helper function for displaying an area's description and exits.""" # Print the room name. print(bcolors.start + loc + bcolors.end) print('=' * len(loc)) # Print the room's description (using textwrap.wrap()) print('\n'.join(textwrap.wrap(worldRooms[loc][DESC], SCREEN_WIDTH))) # Print all the items on the ground. if len(worldRooms[loc][GROUND]) > 0: print("") for item in worldRooms[loc][GROUND]: print(worldItems[item][GROUNDDESC]) try: # Check storage exists if len(worldRooms[loc][STORAGE]) > 0: print(bcolors.start + "The treasures you have accrewed thus far are (Chest) :" + bcolors.end) for item in worldRooms[loc][STORAGE]: print (worldItems[item][STORAGEDESC]) except KeyError: return default # Print all the exits. exits = [] for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[loc].keys(): exits.append(direction.title()) print("") if showFullExits: for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) else: print('Exits: %s' % ' '.join(exits)) def moveDirection(direction): """A helper function that changes the location of the player.""" global location if direction in worldRooms[location]: print('You move to the %s.' % direction) location = worldRooms[location][direction] displayLocation(location, default) else: print('You cannot move in that direction') def getAllDescWords(itemList): """Returns a list of "description words" for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.extend(worldItems[item][DESCWORDS]) return list(set(descWords)) def getAllFirstDescWords(itemList): """Returns a list of the first "description word" in the list of description words for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.append(worldItems[item][DESCWORDS][0]) return list(set(descWords)) def getFirstItemMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique for item in itemList: if desc in worldItems[item][DESCWORDS]: return item return None def getAllItemsMatchingDesc(desc, itemList): itemList = list(set(itemList)) # make itemList unique matchingItems = [] for item in itemList: if desc in worldItems[item][DESCWORDS]: matchingItems.append(item) return matchingItems class TextAdventureCmd(cmd.Cmd): prompt = '\n> ' # The default() method is called when none of the other do_*() command methods match. def default(self, arg): print('I do not understand that command. Type ' + bcolors.start + '"help"' + bcolors.end + ' for a list of commands.') # A very simple "quit" command to terminate the program: def do_quit(self, arg): """Quit the game.""" return True # this exits the Cmd application loop in TextAdventureCmd.cmdloop() def help_combat(self): print('Combat is not implemented in this program.') # These direction commands have a long (i.e. north) and show (i.e. n) form. # Since the code is basically the same, I put it in the moveDirection() # function. def do_north(self, arg): """Go to the area to the north, if possible.""" moveDirection('north') def do_south(self, arg): """Go to the area to the south, if possible.""" moveDirection('south') def do_east(self, arg): """Go to the area to the east, if possible.""" moveDirection('east') def do_west(self, arg): """Go to the area to the west, if possible.""" moveDirection('west') def do_up(self, arg): """Go to the area upwards, if possible.""" moveDirection('up') def do_down(self, arg): """Go to the area downwards, if possible.""" moveDirection('down') # Since the code is the exact same, we can just copy the # methods with shortened names: do_n = do_north do_s = do_south do_e = do_east do_w = do_west do_u = do_up do_d = do_down def do_exits(self, arg): """Toggle showing full exit descriptions or brief exit descriptions.""" global showFullExits showFullExits = not showFullExits if showFullExits: print('Showing full exit descriptions.') else: print('Showing brief exit descriptions.') def do_inventory(self, arg): """Display a list of the items in your possession.""" if len(inventory) == 0: print('Inventory:\n (nothing)') return # first get a count of each distinct item in the inventory itemCount = {} for item in inventory: if item in itemCount.keys(): itemCount[item] += 1 else: itemCount[item] = 1 # get a list of inventory items with duplicates removed: print('Inventory:') for item in set(inventory): if itemCount[item] > 1: print(' %s (%s)' % (item, itemCount[item])) else: print(' ' + item) do_inv = do_inventory do_i = do_inventory def do_take(self, arg): """"take <item> - Take an item on the ground.""" # put this value in a more suitably named variable itemToTake = arg.lower() if itemToTake == '': print('Take what? Type "look" the items on the ground here.') return cantTake = False # get the item name that the player's command describes for item in getAllItemsMatchingDesc(itemToTake, worldRooms[location][GROUND]): if worldItems[item].get(TAKEABLE, True) == False: cantTake = True continue # there may be other items named this that you can take, so we continue checking print("Taken.") worldRooms[location][GROUND].remove(item) # remove from the ground inventory.append(item) # add to inventory return # something funny if itemToTake == 'chest': print(bcolors.start + "Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git." + bcolors.end) return if cantTake: print('You cannot take "%s".' % (itemToTake)) else: print('That is not in or around the area, maybe it was your imagination?') def do_use(self, arg): """"use <item> - Use an item in in your inventory.""" itemToUse = arg.lower() if itemToUse == '': print('Use what? Type "inv" to see the items in your invetory.') return cantUse = False #look up the item the player describes invDescWords = getAllDescWords(inventory) if itemToUse not in invDescWords: print('You do not have that item to use it') return for item in getAllItemsMatchingDesc(itemToUse, inventory): if worldItems[item].get(USEABLE, True) == False: cantUse = True continue print('%s' % (worldItems[item][USEDESCTRUE])) #print('You use %s' % (worldItems[item][SHORTDESC])) #inventory.remove(item) return if cantUse: print('You cannot use "%s".' % (itemToUse)) else: print('You do not have that item to use.') def do_drop(self, arg): """"drop <item> - Drop an item from your inventory onto the ground.""" # put this value in a more suitably named variable itemToDrop = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # find out if the player doesn't have that item if itemToDrop not in invDescWords: print('You do not have "%s" in your inventory.' % (itemToDrop)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToDrop, inventory) if item != None: print('You drop %s.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][GROUND].append(item) # add to the ground # put items in a item container def do_put(self, arg): """"put <item> in <item> - Puts an item in a container.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('You want to put "%s" in what?!' % (itemToStore)) return # get the item name that the player's command describes item = getFirstItemMatchingDesc(itemToStore, inventory) if item != None: print('You put %s. in the container.' % (worldItems[item][SHORTDESC])) inventory.remove(item) # remove from inventory worldRooms[location][ITEMINV].append(item) # add to the container def do_store(self, arg): """"store <item> - Stores an item in a safe place, assuming that the room has a storage area.""" # put this value in a more suitably named variable itemToStore = arg.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) # Nice little easter egg :) if itemToStore == 'troll in bag': print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end) return # find out if the player doesn't have that item if itemToStore not in invDescWords: print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore)) return # get the item name that the player's command describes try: item = getFirstItemMatchingDesc(itemToStore, inventory) # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item. if item != None: print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) inventory.remove(item) worldRooms[location][STORAGE].append(item) except KeyError: return("Don't even think about it buster brown.") #item = getFirstItemMatchingDesc(itemToStore, inventory) #if item != None: # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC])) # inventory.remove(item) # remove from inventory # worldRooms[location][STORAGE].append(item) # add to the container def complete_take(self, text, line, begidx, endidx): possibleItems = [] text = text.lower() # if the user has only typed "take" but no item name: if not text: return getAllFirstDescWords(worldRooms[location][GROUND]) # otherwise, get a list of all "description words" for ground items matching the command text so far: for item in list(set(worldRooms[location][GROUND])): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(TAKEABLE, True): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique def complete_drop(self, text, line, begidx, endidx): possibleItems = [] itemToDrop = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) for descWord in invDescWords: if line.startswith('drop %s' % (descWord)): return [] # command is complete # if the user has only typed "drop" but no item name: if itemToDrop == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(text): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique def do_look(self, arg): """Look at an item, direction, or the area: "look" - display the current area's description "look <direction>" - display the description of the area in that direction "look exits" - display the description of all adjacent areas "look <item>" - display the description of an item on the ground in storage or in your inventory""" lookingAt = arg.lower() if lookingAt == '': # "look" will re-print the area description displayLocation(location, default) return if lookingAt == 'exits': for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: print('%s: %s' % (direction.title(), worldRooms[location][direction])) return if lookingAt in ('north', 'west', 'east', 'south', 'up', 'down', 'n', 'w', 'e', 's', 'u', 'd'): if lookingAt.startswith('n') and NORTH in worldRooms[location]: print(worldRooms[location][NORTH]) elif lookingAt.startswith('w') and WEST in worldRooms[location]: print(worldRooms[location][WEST]) elif lookingAt.startswith('e') and EAST in worldRooms[location]: print(worldRooms[location][EAST]) elif lookingAt.startswith('s') and SOUTH in worldRooms[location]: print(worldRooms[location][SOUTH]) elif lookingAt.startswith('u') and UP in worldRooms[location]: print(worldRooms[location][UP]) elif lookingAt.startswith('d') and DOWN in worldRooms[location]: print(worldRooms[location][DOWN]) else: print('There is nothing in that direction.') return # see if the item being looked at is on the ground at this location or in storage. #item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND][STORAGE]) item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND]) if item != None: print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return # see if the item being looked at is in the inventory item = getFirstItemMatchingDesc(lookingAt, inventory) if item != None:
print('You do not see that nearby.') def complete_look(self, text, line, begidx, endidx): possibleItems = [] lookingAt = text.lower() # get a list of all "description words" for each item in the inventory invDescWords = getAllDescWords(inventory) groundDescWords = getAllDescWords(worldRooms[location][GROUND]) for descWord in invDescWords + groundDescWords + [NORTH, SOUTH, EAST, WEST, UP, DOWN]: if line.startswith('look %s' % (descWord)): return [] # command is complete # if the user has only typed "look" but no item name, show all items on ground and directions: if lookingAt == '': possibleItems.extend(getAllFirstDescWords(worldRooms[location][GROUND][STORAGE])) for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction in worldRooms[location]: possibleItems.append(direction) return list(set(possibleItems)) # make list unique # otherwise, get a list of all "description words" for ground items matching the command text so far: for descWord in groundDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) # check for matching directions for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN): if direction.startswith(lookingAt): possibleItems.append(direction) # get a list of all "description words" for inventory items matching the command text so far: for descWord in invDescWords: if descWord.startswith(lookingAt): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique #arg = arg.lower() # Extra ways of writing commands do_read = do_look do_l = do_look def do_eat(self, arg): """"eat <item>" - eat an item in your inventory.""" itemToEat = arg.lower() if itemToEat == '': print('Eat what? Type "inventory" or "inv" to see whats in your inventory.') return cantEat = False for item in getAllItemsMatchingDesc(itemToEat, inventory): if worldItems[item].get(EDIBLE, False) == False: cantEat = True continue # there may be other items named this that you can eat, so we continue checking # NOTE - If you wanted to implement hunger levels, here is where # you would add code that changes the player's hunger level. print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC])) inventory.remove(item) return if cantEat: print('I dont think the "%s" would like you to do that...' % (worldItems[item][SHORTDESC])) else: print('You do not have "%s". Type "inventory" or "inv" to see what in your inventory.' % (itemToEat)) def complete_eat(self, text, line, begidx, endidx): itemToEat = text.lower() possibleItems = [] # if the user has only typed "eat" but no item name: if itemToEat == '': return getAllFirstDescWords(inventory) # otherwise, get a list of all "description words" for edible inventory items matching the command text so far: for item in list(set(inventory)): for descWord in worldItems[item][DESCWORDS]: if descWord.startswith(text) and worldItems[item].get(EDIBLE, False): possibleItems.append(descWord) return list(set(possibleItems)) # make list unique do_exit = do_quit # another way of exiting the game with a differnt word def do_clear(self, arg): """"clear" - Clear all text from the screen.""" if platform.system == "Windows": os.system("cls") else: os.system("clear") # :) def do_hacf(self, arg): if 'Troll' in worldRooms['Main Hall'][STORAGE]: print(bcolors.start + 'A patch will be released soon, you have compinsated, please do not take us to court.' + bcolors.end) else: """hacf - ### ?????? ###""" fake_error = ValueError('ERROR : The spacetime continum has been breached, all you know is a lie.') print(fake_error) print(bcolors.start + "The developer is sorry for this bug, we have deposited something nice for you in the chest." + bcolors.end) #worldItems[item][DESCWORDS] item = 'Troll' worldRooms['Main Hall'][STORAGE].append(item) worldRooms['Main Hall'][GROUND].remove(item) if __name__ == '__main__': print(bcolors.start + ' Escape the house ' + bcolors.end) print('====================') print("") print('[Type "help" for commands.]') print("") displayLocation(location, default) TextAdventureCmd().cmdloop() if len(STORAGE) > 15: print("You hear a soft click, could it be a trap?, Gnomes?!, *GASP* swamp folk... the worst, no its just the door opening, you are free to leave.") print("Congratulations are in order, you found evenything however turns out you could have just opened the door it was never locked at all... good job though..") print('Looks like you are going to be stuck here for a very, very long time.')
print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH))) return
conditional_block
render_global.rs
use std::error; use std::rc::Rc; use std::cell::RefCell; use std::ops::Deref; use std::sync::Mutex; use gl_bindings::gl; use cgmath::{Matrix4, SquareMatrix, vec3, Point3, Rad}; use crate::demo; use crate::utils::lazy_option::Lazy; use crate::render::{Framebuffer, FramebufferAttachment, AttachmentPoint, ImageFormat, RenderSubsystem}; use crate::render::separable_sss::SeparableSSSSubsystem; use crate::render::shader::managed::ManagedProgram; use crate::asset::AssetPathBuf; pub struct RenderGlobal { current_configuration: Rc<RefCell<GraphicsConfiguration>>, current_resolution: (u32, u32), separable_sss_system: SeparableSSSSubsystem, framebuffer_scene_hdr_ehaa: Option<Rc<RefCell<Framebuffer>>>, program_ehaa_scene: ManagedProgram, program_post_composite: ManagedProgram, frametime_query_object_gl: gl::uint, queued_shader_reload: bool, } impl RenderGlobal { pub fn new() -> RenderGlobal { RenderGlobal { current_configuration: Rc::new(RefCell::new(GraphicsConfiguration::new())), current_resolution: (0, 0), separable_sss_system: SeparableSSSSubsystem::new(), framebuffer_scene_hdr_ehaa: None, program_ehaa_scene: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/legacy/main_scene_forward.program"))), program_post_composite: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/post_composite.program"))), frametime_query_object_gl: 0, queued_shader_reload: false, } } pub fn initialize(&mut self, resolution: (u32, u32)) -> Result<(), Box<dyn error::Error>> { // Set initial resolution self.current_resolution = resolution; // Init subsystems self.separable_sss_system.initialize(); // Do initial reconfiguration self.do_reconfigure_pipeline(self.current_resolution, false)?; Ok(()) } pub fn do_reconfigure_pipeline(&mut self, new_resolution: (u32, u32), only_resize: bool) -> Result<(), Box<dyn error::Error>> { // Update state self.current_resolution = new_resolution; let config = RefCell::borrow(&self.current_configuration); let event = ReconfigureEvent { configuration: config.deref(), resolution: new_resolution, only_resize, }; // Configure main fbo if let Some(t) = &mut self.framebuffer_scene_hdr_ehaa { let mut fbo = RefCell::borrow_mut(t); fbo.resize(event.resolution.0, event.resolution.1); } else { // Create fbo self.framebuffer_scene_hdr_ehaa = Some(Rc::new(RefCell::new({ let mut fbo = Framebuffer::new(event.resolution.0, event.resolution.1); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Depth, ImageFormat::get(gl::DEPTH_COMPONENT32F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(0), ImageFormat::get(gl::R11F_G11F_B10F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGB8))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM))); fbo.allocate(); fbo }))); } // Reconfigure subsystems self.separable_sss_system.reconfigure(event); // Drop config for now drop(config); // Create query object if self.frametime_query_object_gl == 0 { self.frametime_query_object_gl = unsafe { let mut query: gl::uint = 0; gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query); query }; } // Load shaders self.reload_shaders(); Ok(()) } fn reload_shaders(&mut self) { // let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap(); // Log println!("Reloading shaders!"); // Reload shaders from asset self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset"); self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset"); // // Delete old shaders
// let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // if let Some(program) = self.program_post_resolve.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // Reload shader from assets // // Load shaders // self.program_ehaa_scene = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/scene_ehaa.vert.glsl"), // &asset_folder.join("shaders/scene_ehaa.frag.glsl"), // Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl")) //// None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // self.program_post_resolve = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/post_resolve.vert.glsl"), // &asset_folder.join("shaders/post_resolve.frag.glsl"), // None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // Reload subsystem shaders self.separable_sss_system.reload_shaders(); } pub fn do_render_frame(&mut self) { // Reload shaders if needed if self.queued_shader_reload { self.queued_shader_reload = false; self.reload_shaders(); } // Update cam state // LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions let active_camera = demo::demo_instance().get_test_camera(); let active_camera = if let Some(cam) = active_camera.upgrade() { cam } else { // No active camera, so don't render anything for now return; }; let camera_fovy: Rad<f32>; let camera_near_z: f32; let camera_far_z: f32; let cam_state = { let cam = Mutex::lock(&active_camera).unwrap(); let mut state = RenderCameraState::new(); // Get camera fovy // let projection: &dyn Any = cam.projection.as_ref(); // let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap(); camera_fovy = cam.projection.camera_fovy(); let (near_z, far_z) = cam.projection.test_depth_planes(); camera_near_z = near_z; camera_far_z = far_z; // Base matrix for our coordinate system ( let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation); state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size); state }; let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix; // Recompile shaders if self.program_ehaa_scene.needs_recompile() { self.program_ehaa_scene.do_recompile(); } if self.program_post_composite.needs_recompile() { self.program_post_composite.do_recompile(); } unsafe { gl::Disable(gl::FRAMEBUFFER_SRGB); gl::Disable(gl::BLEND); gl::Enable(gl::CULL_FACE); gl::FrontFace(gl::CCW); gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order? gl::Enable(gl::DEPTH_TEST); // Setup NDC z axis for reverse float depth gl::DepthFunc(gl::GREATER); gl::ClearDepth(0.0); // 0.0 is far with reverse z gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE); gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead // Use scene shader let scene_shader = self.program_ehaa_scene.program().unwrap(); let scene_shader_gl = scene_shader.program_gl().unwrap(); gl::UseProgram(scene_shader_gl); // Bind scene framebuffer let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl()); // Set the viewport gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei); gl::ClearColor(0.0, 0.0, 0.0, 0.0); gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT); {// Upload matrices let model_matrix = Matrix4::from_scale(1.0); let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float); let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float); let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float); } let start_frametimer = {// Start frametime timer let mut elapsed_frametime: u64 = std::u64::MAX; gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime); if elapsed_frametime != std::u64::MAX { let _float_frametime = (elapsed_frametime as f64) / 1e6; // let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime); // self.window.need_mut().set_title(title.as_str()); // Restart query gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl); true } else { false } }; // Set tessellation state gl::PatchParameteri(gl::PATCH_VERTICES, 3); gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr()); gl::PatchParameterfv(gl::PATCH_DEFAULT_INNER_LEVEL, [1.0f32, 1.0f32].as_ptr()); gl::EnableVertexAttribArray(0); // gl::EnableVertexAttribArray(1); // gl::EnableVertexAttribArray(2); /* {// Draw teapot let test_teapot_vbo = demo::demo_instance().test_teapot_vbo.need(); gl::BindBuffer(gl::ARRAY_BUFFER, test_teapot_vbo.vbo_gl); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const gl::void); gl::DrawArrays(gl::PATCHES, 0, (crate::render::teapot::TEAPOT_VERTEX_DATA.len() / 3) as gl::sizei); } */ // /* {// Draw head model let test_head_model = demo::demo_instance().test_head_model.need(); // Bind textures gl::BindTextureUnit(1, test_head_model.tex_albedo.texture_gl()); gl::BindTextureUnit(2, test_head_model.tex_normal.texture_gl()); gl::BindTextureUnit(4, test_head_model.tex_transmission.texture_gl()); gl::BindBuffer(gl::ARRAY_BUFFER, test_head_model.vertex_buffer_gl); // let stride = 8*4; let stride = 12*4; gl::EnableVertexAttribArray(0); gl::EnableVertexAttribArray(1); gl::EnableVertexAttribArray(2); gl::EnableVertexAttribArray(3); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, stride, 0 as *const gl::void); // vertex gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4) as *const gl::void); // texcoord gl::VertexAttribPointer(2, 3, gl::FLOAT, gl::FALSE, stride, (3*4) as *const gl::void); // normal gl::VertexAttribPointer(3, 4, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4 + 2*4) as *const gl::void); // tangent gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, test_head_model.index_buffer_gl); gl::DrawElements(gl::PATCHES, test_head_model.num_indices as gl::sizei, gl::UNSIGNED_INT, 0 as *const gl::void); // gl::DrawElements(gl::TRIANGLES, self.test_head_model.need().num_indices as gl::GLsizei, gl::UNSIGNED_INT, 0 as *const std::ffi::c_void); gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DisableVertexAttribArray(3); } // */ /* {// Draw debug triangles gl::Begin(gl::PATCHES); // gl::VertexAttrib3f(2, 1.0, 0.616, 0.984); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.5, 0.2, 0.0); let (mouse_x, mouse_y) = demo::demo_instance().window.need().get_cursor_pos(); // gl::VertexAttribI1ui(1, 2); gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); // gl::Vertex3f(0.1, 0.6 + 0.2*(std::time::UNIX_EPOCH.elapsed().unwrap().as_secs_f32()).sin(), 0.0); // gl::Vertex3f(0.1, 0.6, 0.0); // gl::VertexAttrib3f(2, 0.153, 0.0, 1.0); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.2, 0.6, 0.0); // gl::VertexAttribI1ui(1, 2); // gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); gl::End(); } */ {// Resolve separable sss let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); let scene_hdr_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture); let scene_depth_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Depth).unwrap().texture); // Render ssss self.separable_sss_system.do_resolve_sss(&scene_hdr_rt, &scene_depth_rt, camera_fovy, (camera_near_z, camera_far_z)); } {// Do ehaa resolve pass let post_resolve_shader = self.program_post_composite.program().unwrap(); // // DEBUG: Blit framebuffer // gl::BlitNamedFramebuffer(self.framebuffer_scene_hdr_ehaa.need().handle_gl(), 0, 0, 0, 1280, 720, 0, 0, 1280, 720, gl::COLOR_BUFFER_BIT, gl::NEAREST); gl::Disable(gl::DEPTH_TEST); // Bind resolve shader gl::UseProgram(post_resolve_shader.program_gl().unwrap()); // Bind shaders let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); // gl::BindTextureUnit(0, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(0, RefCell::borrow(&self.separable_sss_system.fbo_resolve_final.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(1, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(1)).unwrap().texture).texture_gl()); gl::BindTextureUnit(2, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(2)).unwrap().texture).texture_gl()); // Bind back buffer gl::BindFramebuffer(gl::FRAMEBUFFER, 0); // Draw oversized fullscreen triangles gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DrawArrays(gl::TRIANGLES, 0, 3); } // End frametimer query if start_frametimer { gl::EndQuery(gl::TIME_ELAPSED); } } } pub fn queue_shader_reload(&mut self) { self.queued_shader_reload = true; } } pub struct GraphicsConfiguration { } impl GraphicsConfiguration { pub fn new() -> GraphicsConfiguration { GraphicsConfiguration {} } } pub struct ReconfigureEvent<'a> { pub configuration: &'a GraphicsConfiguration, pub resolution: (u32, u32), pub only_resize: bool, } pub struct RenderCameraState { pub projection_matrix: Matrix4<f32>, pub view_matrix: Matrix4<f32>, } impl RenderCameraState { pub fn new() -> RenderCameraState { RenderCameraState { projection_matrix: Matrix4::identity(), view_matrix: Matrix4::identity(), } } }
// if let Some(program) = self.program_ehaa_scene.take() {
random_line_split
render_global.rs
use std::error; use std::rc::Rc; use std::cell::RefCell; use std::ops::Deref; use std::sync::Mutex; use gl_bindings::gl; use cgmath::{Matrix4, SquareMatrix, vec3, Point3, Rad}; use crate::demo; use crate::utils::lazy_option::Lazy; use crate::render::{Framebuffer, FramebufferAttachment, AttachmentPoint, ImageFormat, RenderSubsystem}; use crate::render::separable_sss::SeparableSSSSubsystem; use crate::render::shader::managed::ManagedProgram; use crate::asset::AssetPathBuf; pub struct RenderGlobal { current_configuration: Rc<RefCell<GraphicsConfiguration>>, current_resolution: (u32, u32), separable_sss_system: SeparableSSSSubsystem, framebuffer_scene_hdr_ehaa: Option<Rc<RefCell<Framebuffer>>>, program_ehaa_scene: ManagedProgram, program_post_composite: ManagedProgram, frametime_query_object_gl: gl::uint, queued_shader_reload: bool, } impl RenderGlobal { pub fn new() -> RenderGlobal { RenderGlobal { current_configuration: Rc::new(RefCell::new(GraphicsConfiguration::new())), current_resolution: (0, 0), separable_sss_system: SeparableSSSSubsystem::new(), framebuffer_scene_hdr_ehaa: None, program_ehaa_scene: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/legacy/main_scene_forward.program"))), program_post_composite: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/post_composite.program"))), frametime_query_object_gl: 0, queued_shader_reload: false, } } pub fn initialize(&mut self, resolution: (u32, u32)) -> Result<(), Box<dyn error::Error>> { // Set initial resolution self.current_resolution = resolution; // Init subsystems self.separable_sss_system.initialize(); // Do initial reconfiguration self.do_reconfigure_pipeline(self.current_resolution, false)?; Ok(()) } pub fn do_reconfigure_pipeline(&mut self, new_resolution: (u32, u32), only_resize: bool) -> Result<(), Box<dyn error::Error>> { // Update state self.current_resolution = new_resolution; let config = RefCell::borrow(&self.current_configuration); let event = ReconfigureEvent { configuration: config.deref(), resolution: new_resolution, only_resize, }; // Configure main fbo if let Some(t) = &mut self.framebuffer_scene_hdr_ehaa
else { // Create fbo self.framebuffer_scene_hdr_ehaa = Some(Rc::new(RefCell::new({ let mut fbo = Framebuffer::new(event.resolution.0, event.resolution.1); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Depth, ImageFormat::get(gl::DEPTH_COMPONENT32F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(0), ImageFormat::get(gl::R11F_G11F_B10F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGB8))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM))); fbo.allocate(); fbo }))); } // Reconfigure subsystems self.separable_sss_system.reconfigure(event); // Drop config for now drop(config); // Create query object if self.frametime_query_object_gl == 0 { self.frametime_query_object_gl = unsafe { let mut query: gl::uint = 0; gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query); query }; } // Load shaders self.reload_shaders(); Ok(()) } fn reload_shaders(&mut self) { // let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap(); // Log println!("Reloading shaders!"); // Reload shaders from asset self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset"); self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset"); // // Delete old shaders // if let Some(program) = self.program_ehaa_scene.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // if let Some(program) = self.program_post_resolve.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // Reload shader from assets // // Load shaders // self.program_ehaa_scene = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/scene_ehaa.vert.glsl"), // &asset_folder.join("shaders/scene_ehaa.frag.glsl"), // Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl")) //// None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // self.program_post_resolve = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/post_resolve.vert.glsl"), // &asset_folder.join("shaders/post_resolve.frag.glsl"), // None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // Reload subsystem shaders self.separable_sss_system.reload_shaders(); } pub fn do_render_frame(&mut self) { // Reload shaders if needed if self.queued_shader_reload { self.queued_shader_reload = false; self.reload_shaders(); } // Update cam state // LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions let active_camera = demo::demo_instance().get_test_camera(); let active_camera = if let Some(cam) = active_camera.upgrade() { cam } else { // No active camera, so don't render anything for now return; }; let camera_fovy: Rad<f32>; let camera_near_z: f32; let camera_far_z: f32; let cam_state = { let cam = Mutex::lock(&active_camera).unwrap(); let mut state = RenderCameraState::new(); // Get camera fovy // let projection: &dyn Any = cam.projection.as_ref(); // let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap(); camera_fovy = cam.projection.camera_fovy(); let (near_z, far_z) = cam.projection.test_depth_planes(); camera_near_z = near_z; camera_far_z = far_z; // Base matrix for our coordinate system ( let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation); state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size); state }; let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix; // Recompile shaders if self.program_ehaa_scene.needs_recompile() { self.program_ehaa_scene.do_recompile(); } if self.program_post_composite.needs_recompile() { self.program_post_composite.do_recompile(); } unsafe { gl::Disable(gl::FRAMEBUFFER_SRGB); gl::Disable(gl::BLEND); gl::Enable(gl::CULL_FACE); gl::FrontFace(gl::CCW); gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order? gl::Enable(gl::DEPTH_TEST); // Setup NDC z axis for reverse float depth gl::DepthFunc(gl::GREATER); gl::ClearDepth(0.0); // 0.0 is far with reverse z gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE); gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead // Use scene shader let scene_shader = self.program_ehaa_scene.program().unwrap(); let scene_shader_gl = scene_shader.program_gl().unwrap(); gl::UseProgram(scene_shader_gl); // Bind scene framebuffer let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl()); // Set the viewport gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei); gl::ClearColor(0.0, 0.0, 0.0, 0.0); gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT); {// Upload matrices let model_matrix = Matrix4::from_scale(1.0); let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float); let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float); let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float); } let start_frametimer = {// Start frametime timer let mut elapsed_frametime: u64 = std::u64::MAX; gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime); if elapsed_frametime != std::u64::MAX { let _float_frametime = (elapsed_frametime as f64) / 1e6; // let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime); // self.window.need_mut().set_title(title.as_str()); // Restart query gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl); true } else { false } }; // Set tessellation state gl::PatchParameteri(gl::PATCH_VERTICES, 3); gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr()); gl::PatchParameterfv(gl::PATCH_DEFAULT_INNER_LEVEL, [1.0f32, 1.0f32].as_ptr()); gl::EnableVertexAttribArray(0); // gl::EnableVertexAttribArray(1); // gl::EnableVertexAttribArray(2); /* {// Draw teapot let test_teapot_vbo = demo::demo_instance().test_teapot_vbo.need(); gl::BindBuffer(gl::ARRAY_BUFFER, test_teapot_vbo.vbo_gl); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const gl::void); gl::DrawArrays(gl::PATCHES, 0, (crate::render::teapot::TEAPOT_VERTEX_DATA.len() / 3) as gl::sizei); } */ // /* {// Draw head model let test_head_model = demo::demo_instance().test_head_model.need(); // Bind textures gl::BindTextureUnit(1, test_head_model.tex_albedo.texture_gl()); gl::BindTextureUnit(2, test_head_model.tex_normal.texture_gl()); gl::BindTextureUnit(4, test_head_model.tex_transmission.texture_gl()); gl::BindBuffer(gl::ARRAY_BUFFER, test_head_model.vertex_buffer_gl); // let stride = 8*4; let stride = 12*4; gl::EnableVertexAttribArray(0); gl::EnableVertexAttribArray(1); gl::EnableVertexAttribArray(2); gl::EnableVertexAttribArray(3); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, stride, 0 as *const gl::void); // vertex gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4) as *const gl::void); // texcoord gl::VertexAttribPointer(2, 3, gl::FLOAT, gl::FALSE, stride, (3*4) as *const gl::void); // normal gl::VertexAttribPointer(3, 4, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4 + 2*4) as *const gl::void); // tangent gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, test_head_model.index_buffer_gl); gl::DrawElements(gl::PATCHES, test_head_model.num_indices as gl::sizei, gl::UNSIGNED_INT, 0 as *const gl::void); // gl::DrawElements(gl::TRIANGLES, self.test_head_model.need().num_indices as gl::GLsizei, gl::UNSIGNED_INT, 0 as *const std::ffi::c_void); gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DisableVertexAttribArray(3); } // */ /* {// Draw debug triangles gl::Begin(gl::PATCHES); // gl::VertexAttrib3f(2, 1.0, 0.616, 0.984); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.5, 0.2, 0.0); let (mouse_x, mouse_y) = demo::demo_instance().window.need().get_cursor_pos(); // gl::VertexAttribI1ui(1, 2); gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); // gl::Vertex3f(0.1, 0.6 + 0.2*(std::time::UNIX_EPOCH.elapsed().unwrap().as_secs_f32()).sin(), 0.0); // gl::Vertex3f(0.1, 0.6, 0.0); // gl::VertexAttrib3f(2, 0.153, 0.0, 1.0); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.2, 0.6, 0.0); // gl::VertexAttribI1ui(1, 2); // gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); gl::End(); } */ {// Resolve separable sss let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); let scene_hdr_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture); let scene_depth_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Depth).unwrap().texture); // Render ssss self.separable_sss_system.do_resolve_sss(&scene_hdr_rt, &scene_depth_rt, camera_fovy, (camera_near_z, camera_far_z)); } {// Do ehaa resolve pass let post_resolve_shader = self.program_post_composite.program().unwrap(); // // DEBUG: Blit framebuffer // gl::BlitNamedFramebuffer(self.framebuffer_scene_hdr_ehaa.need().handle_gl(), 0, 0, 0, 1280, 720, 0, 0, 1280, 720, gl::COLOR_BUFFER_BIT, gl::NEAREST); gl::Disable(gl::DEPTH_TEST); // Bind resolve shader gl::UseProgram(post_resolve_shader.program_gl().unwrap()); // Bind shaders let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); // gl::BindTextureUnit(0, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(0, RefCell::borrow(&self.separable_sss_system.fbo_resolve_final.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(1, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(1)).unwrap().texture).texture_gl()); gl::BindTextureUnit(2, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(2)).unwrap().texture).texture_gl()); // Bind back buffer gl::BindFramebuffer(gl::FRAMEBUFFER, 0); // Draw oversized fullscreen triangles gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DrawArrays(gl::TRIANGLES, 0, 3); } // End frametimer query if start_frametimer { gl::EndQuery(gl::TIME_ELAPSED); } } } pub fn queue_shader_reload(&mut self) { self.queued_shader_reload = true; } } pub struct GraphicsConfiguration { } impl GraphicsConfiguration { pub fn new() -> GraphicsConfiguration { GraphicsConfiguration {} } } pub struct ReconfigureEvent<'a> { pub configuration: &'a GraphicsConfiguration, pub resolution: (u32, u32), pub only_resize: bool, } pub struct RenderCameraState { pub projection_matrix: Matrix4<f32>, pub view_matrix: Matrix4<f32>, } impl RenderCameraState { pub fn new() -> RenderCameraState { RenderCameraState { projection_matrix: Matrix4::identity(), view_matrix: Matrix4::identity(), } } }
{ let mut fbo = RefCell::borrow_mut(t); fbo.resize(event.resolution.0, event.resolution.1); }
conditional_block
render_global.rs
use std::error; use std::rc::Rc; use std::cell::RefCell; use std::ops::Deref; use std::sync::Mutex; use gl_bindings::gl; use cgmath::{Matrix4, SquareMatrix, vec3, Point3, Rad}; use crate::demo; use crate::utils::lazy_option::Lazy; use crate::render::{Framebuffer, FramebufferAttachment, AttachmentPoint, ImageFormat, RenderSubsystem}; use crate::render::separable_sss::SeparableSSSSubsystem; use crate::render::shader::managed::ManagedProgram; use crate::asset::AssetPathBuf; pub struct RenderGlobal { current_configuration: Rc<RefCell<GraphicsConfiguration>>, current_resolution: (u32, u32), separable_sss_system: SeparableSSSSubsystem, framebuffer_scene_hdr_ehaa: Option<Rc<RefCell<Framebuffer>>>, program_ehaa_scene: ManagedProgram, program_post_composite: ManagedProgram, frametime_query_object_gl: gl::uint, queued_shader_reload: bool, } impl RenderGlobal { pub fn new() -> RenderGlobal { RenderGlobal { current_configuration: Rc::new(RefCell::new(GraphicsConfiguration::new())), current_resolution: (0, 0), separable_sss_system: SeparableSSSSubsystem::new(), framebuffer_scene_hdr_ehaa: None, program_ehaa_scene: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/legacy/main_scene_forward.program"))), program_post_composite: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/post_composite.program"))), frametime_query_object_gl: 0, queued_shader_reload: false, } } pub fn initialize(&mut self, resolution: (u32, u32)) -> Result<(), Box<dyn error::Error>> { // Set initial resolution self.current_resolution = resolution; // Init subsystems self.separable_sss_system.initialize(); // Do initial reconfiguration self.do_reconfigure_pipeline(self.current_resolution, false)?; Ok(()) } pub fn do_reconfigure_pipeline(&mut self, new_resolution: (u32, u32), only_resize: bool) -> Result<(), Box<dyn error::Error>> { // Update state self.current_resolution = new_resolution; let config = RefCell::borrow(&self.current_configuration); let event = ReconfigureEvent { configuration: config.deref(), resolution: new_resolution, only_resize, }; // Configure main fbo if let Some(t) = &mut self.framebuffer_scene_hdr_ehaa { let mut fbo = RefCell::borrow_mut(t); fbo.resize(event.resolution.0, event.resolution.1); } else { // Create fbo self.framebuffer_scene_hdr_ehaa = Some(Rc::new(RefCell::new({ let mut fbo = Framebuffer::new(event.resolution.0, event.resolution.1); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Depth, ImageFormat::get(gl::DEPTH_COMPONENT32F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(0), ImageFormat::get(gl::R11F_G11F_B10F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGB8))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM))); fbo.allocate(); fbo }))); } // Reconfigure subsystems self.separable_sss_system.reconfigure(event); // Drop config for now drop(config); // Create query object if self.frametime_query_object_gl == 0 { self.frametime_query_object_gl = unsafe { let mut query: gl::uint = 0; gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query); query }; } // Load shaders self.reload_shaders(); Ok(()) } fn
(&mut self) { // let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap(); // Log println!("Reloading shaders!"); // Reload shaders from asset self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset"); self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset"); // // Delete old shaders // if let Some(program) = self.program_ehaa_scene.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // if let Some(program) = self.program_post_resolve.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // Reload shader from assets // // Load shaders // self.program_ehaa_scene = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/scene_ehaa.vert.glsl"), // &asset_folder.join("shaders/scene_ehaa.frag.glsl"), // Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl")) //// None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // self.program_post_resolve = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/post_resolve.vert.glsl"), // &asset_folder.join("shaders/post_resolve.frag.glsl"), // None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // Reload subsystem shaders self.separable_sss_system.reload_shaders(); } pub fn do_render_frame(&mut self) { // Reload shaders if needed if self.queued_shader_reload { self.queued_shader_reload = false; self.reload_shaders(); } // Update cam state // LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions let active_camera = demo::demo_instance().get_test_camera(); let active_camera = if let Some(cam) = active_camera.upgrade() { cam } else { // No active camera, so don't render anything for now return; }; let camera_fovy: Rad<f32>; let camera_near_z: f32; let camera_far_z: f32; let cam_state = { let cam = Mutex::lock(&active_camera).unwrap(); let mut state = RenderCameraState::new(); // Get camera fovy // let projection: &dyn Any = cam.projection.as_ref(); // let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap(); camera_fovy = cam.projection.camera_fovy(); let (near_z, far_z) = cam.projection.test_depth_planes(); camera_near_z = near_z; camera_far_z = far_z; // Base matrix for our coordinate system ( let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation); state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size); state }; let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix; // Recompile shaders if self.program_ehaa_scene.needs_recompile() { self.program_ehaa_scene.do_recompile(); } if self.program_post_composite.needs_recompile() { self.program_post_composite.do_recompile(); } unsafe { gl::Disable(gl::FRAMEBUFFER_SRGB); gl::Disable(gl::BLEND); gl::Enable(gl::CULL_FACE); gl::FrontFace(gl::CCW); gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order? gl::Enable(gl::DEPTH_TEST); // Setup NDC z axis for reverse float depth gl::DepthFunc(gl::GREATER); gl::ClearDepth(0.0); // 0.0 is far with reverse z gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE); gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead // Use scene shader let scene_shader = self.program_ehaa_scene.program().unwrap(); let scene_shader_gl = scene_shader.program_gl().unwrap(); gl::UseProgram(scene_shader_gl); // Bind scene framebuffer let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl()); // Set the viewport gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei); gl::ClearColor(0.0, 0.0, 0.0, 0.0); gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT); {// Upload matrices let model_matrix = Matrix4::from_scale(1.0); let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float); let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float); let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float); } let start_frametimer = {// Start frametime timer let mut elapsed_frametime: u64 = std::u64::MAX; gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime); if elapsed_frametime != std::u64::MAX { let _float_frametime = (elapsed_frametime as f64) / 1e6; // let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime); // self.window.need_mut().set_title(title.as_str()); // Restart query gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl); true } else { false } }; // Set tessellation state gl::PatchParameteri(gl::PATCH_VERTICES, 3); gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr()); gl::PatchParameterfv(gl::PATCH_DEFAULT_INNER_LEVEL, [1.0f32, 1.0f32].as_ptr()); gl::EnableVertexAttribArray(0); // gl::EnableVertexAttribArray(1); // gl::EnableVertexAttribArray(2); /* {// Draw teapot let test_teapot_vbo = demo::demo_instance().test_teapot_vbo.need(); gl::BindBuffer(gl::ARRAY_BUFFER, test_teapot_vbo.vbo_gl); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const gl::void); gl::DrawArrays(gl::PATCHES, 0, (crate::render::teapot::TEAPOT_VERTEX_DATA.len() / 3) as gl::sizei); } */ // /* {// Draw head model let test_head_model = demo::demo_instance().test_head_model.need(); // Bind textures gl::BindTextureUnit(1, test_head_model.tex_albedo.texture_gl()); gl::BindTextureUnit(2, test_head_model.tex_normal.texture_gl()); gl::BindTextureUnit(4, test_head_model.tex_transmission.texture_gl()); gl::BindBuffer(gl::ARRAY_BUFFER, test_head_model.vertex_buffer_gl); // let stride = 8*4; let stride = 12*4; gl::EnableVertexAttribArray(0); gl::EnableVertexAttribArray(1); gl::EnableVertexAttribArray(2); gl::EnableVertexAttribArray(3); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, stride, 0 as *const gl::void); // vertex gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4) as *const gl::void); // texcoord gl::VertexAttribPointer(2, 3, gl::FLOAT, gl::FALSE, stride, (3*4) as *const gl::void); // normal gl::VertexAttribPointer(3, 4, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4 + 2*4) as *const gl::void); // tangent gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, test_head_model.index_buffer_gl); gl::DrawElements(gl::PATCHES, test_head_model.num_indices as gl::sizei, gl::UNSIGNED_INT, 0 as *const gl::void); // gl::DrawElements(gl::TRIANGLES, self.test_head_model.need().num_indices as gl::GLsizei, gl::UNSIGNED_INT, 0 as *const std::ffi::c_void); gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DisableVertexAttribArray(3); } // */ /* {// Draw debug triangles gl::Begin(gl::PATCHES); // gl::VertexAttrib3f(2, 1.0, 0.616, 0.984); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.5, 0.2, 0.0); let (mouse_x, mouse_y) = demo::demo_instance().window.need().get_cursor_pos(); // gl::VertexAttribI1ui(1, 2); gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); // gl::Vertex3f(0.1, 0.6 + 0.2*(std::time::UNIX_EPOCH.elapsed().unwrap().as_secs_f32()).sin(), 0.0); // gl::Vertex3f(0.1, 0.6, 0.0); // gl::VertexAttrib3f(2, 0.153, 0.0, 1.0); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.2, 0.6, 0.0); // gl::VertexAttribI1ui(1, 2); // gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); gl::End(); } */ {// Resolve separable sss let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); let scene_hdr_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture); let scene_depth_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Depth).unwrap().texture); // Render ssss self.separable_sss_system.do_resolve_sss(&scene_hdr_rt, &scene_depth_rt, camera_fovy, (camera_near_z, camera_far_z)); } {// Do ehaa resolve pass let post_resolve_shader = self.program_post_composite.program().unwrap(); // // DEBUG: Blit framebuffer // gl::BlitNamedFramebuffer(self.framebuffer_scene_hdr_ehaa.need().handle_gl(), 0, 0, 0, 1280, 720, 0, 0, 1280, 720, gl::COLOR_BUFFER_BIT, gl::NEAREST); gl::Disable(gl::DEPTH_TEST); // Bind resolve shader gl::UseProgram(post_resolve_shader.program_gl().unwrap()); // Bind shaders let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); // gl::BindTextureUnit(0, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(0, RefCell::borrow(&self.separable_sss_system.fbo_resolve_final.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(1, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(1)).unwrap().texture).texture_gl()); gl::BindTextureUnit(2, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(2)).unwrap().texture).texture_gl()); // Bind back buffer gl::BindFramebuffer(gl::FRAMEBUFFER, 0); // Draw oversized fullscreen triangles gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DrawArrays(gl::TRIANGLES, 0, 3); } // End frametimer query if start_frametimer { gl::EndQuery(gl::TIME_ELAPSED); } } } pub fn queue_shader_reload(&mut self) { self.queued_shader_reload = true; } } pub struct GraphicsConfiguration { } impl GraphicsConfiguration { pub fn new() -> GraphicsConfiguration { GraphicsConfiguration {} } } pub struct ReconfigureEvent<'a> { pub configuration: &'a GraphicsConfiguration, pub resolution: (u32, u32), pub only_resize: bool, } pub struct RenderCameraState { pub projection_matrix: Matrix4<f32>, pub view_matrix: Matrix4<f32>, } impl RenderCameraState { pub fn new() -> RenderCameraState { RenderCameraState { projection_matrix: Matrix4::identity(), view_matrix: Matrix4::identity(), } } }
reload_shaders
identifier_name
spinup.py
#!/usr/bin/env python # Copyright (C) 2017 Andy Aschwanden import itertools from collections import OrderedDict import os try: import subprocess32 as sub except: import subprocess as sub from argparse import ArgumentParser import sys sys.path.append("../resources/") from resources import * grid_choices = [5000, 2000, 1000, 500, 250, 100] # set up the option parser parser = ArgumentParser() parser.description = "Generating scripts for model calibration." parser.add_argument( "-n", "--n_procs", dest="n", type=int, help="""number of cores/processors. default=28.""", default=28 ) parser.add_argument("-w", "--wall_time", dest="walltime", help="""walltime. default: 8:00:00.""", default="8:00:00") parser.add_argument( "-q", "--queue", dest="queue", choices=list_queues(), help="""queue. default=long.""", default="long" ) parser.add_argument("-d", "--domain", dest="domain", choices=["og"], help="sets the modeling domain", default="og") parser.add_argument("--exstep", dest="exstep", type=int, help="Writing interval for spatial time series", default=10) parser.add_argument( "-f", "--o_format", dest="oformat", choices=["netcdf3", "netcdf4_parallel", "pnetcdf"], help="output format", default="netcdf3", ) parser.add_argument( "-g", "--grid", dest="grid", type=int, choices=grid_choices, help="horizontal grid resolution", default=2000 ) parser.add_argument("--o_dir", dest="odir", help="output directory. Default: current directory", default="foo") parser.add_argument( "--o_size", dest="osize", choices=["small", "medium", "big", "big_2d"], help="output size type", default="medium" ) parser.add_argument( "-s", "--system", dest="system", choices=list_systems(), help="computer system to use.", default="pleiades_broadwell", ) parser.add_argument( "--calving", dest="calving", choices=["float_kill", "ocean_kill", "eigen_calving", "thickness_calving", "vonmises_calving", "hybrid_calving"], help="calving mechanism", default="vonmises_calving", ) parser.add_argument( "--frontal_melt", dest="frontal_melt", action="store_true", help="Turn on frontal melt", default=False ) parser.add_argument( "--forcing_type", dest="forcing_type", choices=["ctrl", "e_age"], help="output size type", default="ctrl" ) parser.add_argument( "--hydrology", dest="hydrology", choices=["null", "diffuse", "routing"], help="Basal hydrology model.", default="diffuse", ) parser.add_argument( "-p", "--params", dest="params_list", help="Comma-separated list with params for sensitivity", default=None ) parser.add_argument( "--stable_gl", dest="float_kill_calve_near_grounding_line", action="store_true", help="Stable grounding line", default=False, ) parser.add_argument( "--stress_balance", dest="stress_balance", choices=["sia", "ssa+sia", "ssa"], help="stress balance solver", default="ssa+sia", ) parser.add_argument( "--vertical_velocity_approximation", dest="vertical_velocity_approximation", choices=["centered", "upstream"], help="How to approximate vertical velocities", default="upstream", ) parser.add_argument("--start_year", dest="start_year", type=int, help="Simulation start year", default=0) parser.add_argument("--end_year", dest="end_year", type=int, help="Simulation end year", default=10000) options = parser.parse_args() nn = options.n odir = options.odir oformat = options.oformat osize = options.osize queue = options.queue walltime = options.walltime system = options.system calving = options.calving climate = "elevation" exstep = options.exstep float_kill_calve_near_grounding_line = options.float_kill_calve_near_grounding_line forcing_type = options.forcing_type frontal_melt = options.frontal_melt grid = options.grid hydrology = options.hydrology ocean = "const" stress_balance = options.stress_balance vertical_velocity_approximation = options.vertical_velocity_approximation # Check which parameters are used for sensitivity study params_list = options.params_list do_T_max = False do_eigen_calving_k = False do_fice = False do_fsnow = False if params_list is not None: params = params_list.split(",") if "T_max" in params: do_T_max = True if "eigen_calving_k" in params: do_eigen_calving_k = True if "fice" in params: do_fice = True if "fsnow" in params: do_fsnow = True domain = options.domain pism_exec = generate_domain(domain) pism_dataname = "pism_outletglacier_g{}m.nc".format(grid) pism_config = "init_config" pism_config_nc = ".".join([pism_config, "nc"]) pism_config_cdl = os.path.join("../config", ".".join([pism_config, "cdl"])) # Anaconda libssl problem on chinook if system in ("chinook"): ncgen = "/usr/bin/ncgen" else:
cmd = [ncgen, "-o", pism_config_nc, pism_config_cdl] sub.call(cmd) if not os.path.isdir(odir): os.mkdir(odir) state_dir = "state" scalar_dir = "scalar" spatial_dir = "spatial" for tsdir in (scalar_dir, spatial_dir, state_dir): if not os.path.isdir(os.path.join(odir, tsdir)): os.mkdir(os.path.join(odir, tsdir)) odir_tmp = "_".join([odir, "tmp"]) if not os.path.isdir(odir_tmp): os.mkdir(odir_tmp) # ######################################################## # set up model initialization # ######################################################## ssa_e = 1.0 ssa_n_values = [3.25] sia_e_values = [3] ppq_values = [0.6] tefo_values = [0.020] phi_min_values = [15.0] phi_max_values = [45.0] topg_min_values = [-700] topg_max_values = [1000] combinations = list( itertools.product( sia_e_values, ssa_n_values, ppq_values, tefo_values, phi_min_values, phi_max_values, topg_min_values, topg_max_values, ) ) tsstep = "yearly" scripts = [] scripts_post = [] simulation_start_year = options.start_year simulation_end_year = options.end_year for n, combination in enumerate(combinations): sia_e, ssa_n, ppq, tefo, phi_min, phi_max, topg_min, topg_max = combination ttphi = "{},{},{},{}".format(phi_min, phi_max, topg_min, topg_max) name_options = OrderedDict() name_options["sia_e"] = sia_e name_options["ssa_n"] = ssa_n name_options["ppq"] = ppq name_options["tefo"] = tefo # name_options['phi_min'] = phi_min # name_options['phi_max'] = phi_max # name_options['topg_min'] = topg_min # name_options['topg_max'] = topg_max name_options["calving"] = calving full_exp_name = "_".join(["_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())])]) full_outfile = "{domain}_g{grid}m_{experiment}.nc".format( domain=domain.lower(), grid=grid, experiment=full_exp_name ) experiment = "_".join( [ climate, "_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())]), "{}".format(simulation_start_year), "{}".format(simulation_end_year), ] ) # All runs in one script file for coarse grids that fit into max walltime script = "init_{}_g{}m_{}.sh".format(domain.lower(), grid, full_exp_name) scripts.append(script) script_post = "init_{}_g{}m_{}_post.sh".format(domain.lower(), grid, full_exp_name) scripts_post.append(script_post) for filename in script: try: os.remove(filename) except OSError: pass batch_header, batch_system = make_batch_header(system, nn, walltime, queue) with open(script, "w") as f: f.write(batch_header) outfile = "{domain}_g{grid}m_{experiment}.nc".format(domain=domain.lower(), grid=grid, experiment=experiment) prefix = generate_prefix_str(pism_exec) general_params_dict = OrderedDict() general_params_dict["bootstrap"] = "" general_params_dict["i"] = pism_dataname general_params_dict["ys"] = simulation_start_year general_params_dict["ye"] = simulation_end_year general_params_dict["o"] = os.path.join(odir, state_dir, outfile) general_params_dict["o_format"] = oformat general_params_dict["o_size"] = osize general_params_dict["config_override"] = pism_config_nc grid_params_dict = generate_grid_description(grid, domain) sb_params_dict = OrderedDict() sb_params_dict["sia_e"] = sia_e sb_params_dict["ssa_e"] = ssa_e sb_params_dict["ssa_n"] = ssa_n sb_params_dict["ssa_dirichlet_bc"] = "" sb_params_dict["pseudo_plastic_q"] = ppq sb_params_dict["till_effective_fraction_overburden"] = tefo sb_params_dict["topg_to_phi"] = ttphi sb_params_dict["vertical_velocity_approximation"] = vertical_velocity_approximation stress_balance_params_dict = generate_stress_balance(stress_balance, sb_params_dict) climate_params_dict = generate_climate( climate, climatic_mass_balance="-2.5,3,200,1500,2000", ice_surface_temp="-5,-20,0,2000" ) ocean_params_dict = generate_ocean(ocean) hydro_params_dict = generate_hydrology(hydrology) calving_params_dict = generate_calving(calving, ocean_kill_file=pism_dataname, thickness_calving_threshold=200) exvars = default_spatial_ts_vars() spatial_ts_dict = generate_spatial_ts(full_outfile, exvars, exstep, odir=odir_tmp, split=True) scalar_ts_dict = generate_scalar_ts( outfile, tsstep, start=simulation_start_year, end=simulation_end_year, odir=os.path.join(odir, scalar_dir) ) all_params_dict = merge_dicts( general_params_dict, grid_params_dict, stress_balance_params_dict, climate_params_dict, ocean_params_dict, hydro_params_dict, calving_params_dict, spatial_ts_dict, scalar_ts_dict, ) all_params = " ".join([" ".join(["-" + k, str(v)]) for k, v in list(all_params_dict.items())]) if system in ("debug"): cmd = " ".join( [ batch_system["mpido"], prefix, all_params, "2>&1 | tee {outdir}/job.${batch}".format(outdir=odir, batch=batch_system["job_id"]), ] ) else: cmd = " ".join( [ batch_system["mpido"], prefix, all_params, "> {outdir}/job.${batch} 2>&1".format(outdir=odir, batch=batch_system["job_id"]), ] ) f.write(cmd) f.write("\n") f.write("\n") f.write("{} {}\n".format(batch_system["submit"], script_post)) f.write("\n") post_header = make_batch_post_header(system) with open(script_post, "w") as f: f.write(post_header) extra_file = spatial_ts_dict["extra_file"] myfiles = " ".join( [ "{}_{}.000.nc".format(extra_file, k) for k in range(simulation_start_year + exstep, simulation_end_year, exstep) ] ) myoutfile = extra_file + ".nc" myoutfile = os.path.join(odir, spatial_dir, os.path.split(myoutfile)[-1]) cmd = " ".join(["ncrcat -O -6 -h", myfiles, myoutfile, "\n"]) f.write(cmd) scripts = uniquify_list(scripts) scripts_post = uniquify_list(scripts_post) print("\n".join([script for script in scripts])) print("\nwritten\n") print("\n".join([script for script in scripts_post])) print("\nwritten\n")
ncgen = "ncgen"
conditional_block
spinup.py
#!/usr/bin/env python # Copyright (C) 2017 Andy Aschwanden import itertools from collections import OrderedDict import os try: import subprocess32 as sub except: import subprocess as sub from argparse import ArgumentParser import sys sys.path.append("../resources/") from resources import * grid_choices = [5000, 2000, 1000, 500, 250, 100] # set up the option parser parser = ArgumentParser() parser.description = "Generating scripts for model calibration." parser.add_argument( "-n", "--n_procs", dest="n", type=int, help="""number of cores/processors. default=28.""", default=28 ) parser.add_argument("-w", "--wall_time", dest="walltime", help="""walltime. default: 8:00:00.""", default="8:00:00") parser.add_argument( "-q", "--queue", dest="queue", choices=list_queues(), help="""queue. default=long.""", default="long" ) parser.add_argument("-d", "--domain", dest="domain", choices=["og"], help="sets the modeling domain", default="og") parser.add_argument("--exstep", dest="exstep", type=int, help="Writing interval for spatial time series", default=10) parser.add_argument( "-f", "--o_format", dest="oformat", choices=["netcdf3", "netcdf4_parallel", "pnetcdf"], help="output format", default="netcdf3", ) parser.add_argument( "-g", "--grid", dest="grid", type=int, choices=grid_choices, help="horizontal grid resolution", default=2000 ) parser.add_argument("--o_dir", dest="odir", help="output directory. Default: current directory", default="foo") parser.add_argument( "--o_size", dest="osize", choices=["small", "medium", "big", "big_2d"], help="output size type", default="medium" ) parser.add_argument( "-s", "--system", dest="system", choices=list_systems(), help="computer system to use.", default="pleiades_broadwell", ) parser.add_argument( "--calving", dest="calving", choices=["float_kill", "ocean_kill", "eigen_calving", "thickness_calving", "vonmises_calving", "hybrid_calving"], help="calving mechanism", default="vonmises_calving", ) parser.add_argument( "--frontal_melt", dest="frontal_melt", action="store_true", help="Turn on frontal melt", default=False ) parser.add_argument( "--forcing_type", dest="forcing_type", choices=["ctrl", "e_age"], help="output size type", default="ctrl" ) parser.add_argument( "--hydrology", dest="hydrology", choices=["null", "diffuse", "routing"], help="Basal hydrology model.", default="diffuse", ) parser.add_argument( "-p", "--params", dest="params_list", help="Comma-separated list with params for sensitivity", default=None ) parser.add_argument( "--stable_gl", dest="float_kill_calve_near_grounding_line", action="store_true", help="Stable grounding line", default=False, ) parser.add_argument( "--stress_balance", dest="stress_balance", choices=["sia", "ssa+sia", "ssa"], help="stress balance solver", default="ssa+sia", ) parser.add_argument( "--vertical_velocity_approximation", dest="vertical_velocity_approximation", choices=["centered", "upstream"], help="How to approximate vertical velocities", default="upstream", ) parser.add_argument("--start_year", dest="start_year", type=int, help="Simulation start year", default=0) parser.add_argument("--end_year", dest="end_year", type=int, help="Simulation end year", default=10000) options = parser.parse_args() nn = options.n odir = options.odir oformat = options.oformat osize = options.osize queue = options.queue walltime = options.walltime system = options.system calving = options.calving climate = "elevation" exstep = options.exstep float_kill_calve_near_grounding_line = options.float_kill_calve_near_grounding_line forcing_type = options.forcing_type frontal_melt = options.frontal_melt grid = options.grid hydrology = options.hydrology ocean = "const" stress_balance = options.stress_balance vertical_velocity_approximation = options.vertical_velocity_approximation # Check which parameters are used for sensitivity study params_list = options.params_list do_T_max = False do_eigen_calving_k = False do_fice = False do_fsnow = False if params_list is not None: params = params_list.split(",") if "T_max" in params: do_T_max = True if "eigen_calving_k" in params: do_eigen_calving_k = True if "fice" in params: do_fice = True if "fsnow" in params: do_fsnow = True domain = options.domain pism_exec = generate_domain(domain) pism_dataname = "pism_outletglacier_g{}m.nc".format(grid) pism_config = "init_config" pism_config_nc = ".".join([pism_config, "nc"]) pism_config_cdl = os.path.join("../config", ".".join([pism_config, "cdl"])) # Anaconda libssl problem on chinook if system in ("chinook"): ncgen = "/usr/bin/ncgen" else: ncgen = "ncgen" cmd = [ncgen, "-o", pism_config_nc, pism_config_cdl] sub.call(cmd) if not os.path.isdir(odir): os.mkdir(odir) state_dir = "state" scalar_dir = "scalar" spatial_dir = "spatial" for tsdir in (scalar_dir, spatial_dir, state_dir): if not os.path.isdir(os.path.join(odir, tsdir)): os.mkdir(os.path.join(odir, tsdir)) odir_tmp = "_".join([odir, "tmp"]) if not os.path.isdir(odir_tmp): os.mkdir(odir_tmp) # ######################################################## # set up model initialization # ######################################################## ssa_e = 1.0 ssa_n_values = [3.25] sia_e_values = [3] ppq_values = [0.6] tefo_values = [0.020] phi_min_values = [15.0] phi_max_values = [45.0] topg_min_values = [-700] topg_max_values = [1000] combinations = list(
ssa_n_values, ppq_values, tefo_values, phi_min_values, phi_max_values, topg_min_values, topg_max_values, ) ) tsstep = "yearly" scripts = [] scripts_post = [] simulation_start_year = options.start_year simulation_end_year = options.end_year for n, combination in enumerate(combinations): sia_e, ssa_n, ppq, tefo, phi_min, phi_max, topg_min, topg_max = combination ttphi = "{},{},{},{}".format(phi_min, phi_max, topg_min, topg_max) name_options = OrderedDict() name_options["sia_e"] = sia_e name_options["ssa_n"] = ssa_n name_options["ppq"] = ppq name_options["tefo"] = tefo # name_options['phi_min'] = phi_min # name_options['phi_max'] = phi_max # name_options['topg_min'] = topg_min # name_options['topg_max'] = topg_max name_options["calving"] = calving full_exp_name = "_".join(["_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())])]) full_outfile = "{domain}_g{grid}m_{experiment}.nc".format( domain=domain.lower(), grid=grid, experiment=full_exp_name ) experiment = "_".join( [ climate, "_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())]), "{}".format(simulation_start_year), "{}".format(simulation_end_year), ] ) # All runs in one script file for coarse grids that fit into max walltime script = "init_{}_g{}m_{}.sh".format(domain.lower(), grid, full_exp_name) scripts.append(script) script_post = "init_{}_g{}m_{}_post.sh".format(domain.lower(), grid, full_exp_name) scripts_post.append(script_post) for filename in script: try: os.remove(filename) except OSError: pass batch_header, batch_system = make_batch_header(system, nn, walltime, queue) with open(script, "w") as f: f.write(batch_header) outfile = "{domain}_g{grid}m_{experiment}.nc".format(domain=domain.lower(), grid=grid, experiment=experiment) prefix = generate_prefix_str(pism_exec) general_params_dict = OrderedDict() general_params_dict["bootstrap"] = "" general_params_dict["i"] = pism_dataname general_params_dict["ys"] = simulation_start_year general_params_dict["ye"] = simulation_end_year general_params_dict["o"] = os.path.join(odir, state_dir, outfile) general_params_dict["o_format"] = oformat general_params_dict["o_size"] = osize general_params_dict["config_override"] = pism_config_nc grid_params_dict = generate_grid_description(grid, domain) sb_params_dict = OrderedDict() sb_params_dict["sia_e"] = sia_e sb_params_dict["ssa_e"] = ssa_e sb_params_dict["ssa_n"] = ssa_n sb_params_dict["ssa_dirichlet_bc"] = "" sb_params_dict["pseudo_plastic_q"] = ppq sb_params_dict["till_effective_fraction_overburden"] = tefo sb_params_dict["topg_to_phi"] = ttphi sb_params_dict["vertical_velocity_approximation"] = vertical_velocity_approximation stress_balance_params_dict = generate_stress_balance(stress_balance, sb_params_dict) climate_params_dict = generate_climate( climate, climatic_mass_balance="-2.5,3,200,1500,2000", ice_surface_temp="-5,-20,0,2000" ) ocean_params_dict = generate_ocean(ocean) hydro_params_dict = generate_hydrology(hydrology) calving_params_dict = generate_calving(calving, ocean_kill_file=pism_dataname, thickness_calving_threshold=200) exvars = default_spatial_ts_vars() spatial_ts_dict = generate_spatial_ts(full_outfile, exvars, exstep, odir=odir_tmp, split=True) scalar_ts_dict = generate_scalar_ts( outfile, tsstep, start=simulation_start_year, end=simulation_end_year, odir=os.path.join(odir, scalar_dir) ) all_params_dict = merge_dicts( general_params_dict, grid_params_dict, stress_balance_params_dict, climate_params_dict, ocean_params_dict, hydro_params_dict, calving_params_dict, spatial_ts_dict, scalar_ts_dict, ) all_params = " ".join([" ".join(["-" + k, str(v)]) for k, v in list(all_params_dict.items())]) if system in ("debug"): cmd = " ".join( [ batch_system["mpido"], prefix, all_params, "2>&1 | tee {outdir}/job.${batch}".format(outdir=odir, batch=batch_system["job_id"]), ] ) else: cmd = " ".join( [ batch_system["mpido"], prefix, all_params, "> {outdir}/job.${batch} 2>&1".format(outdir=odir, batch=batch_system["job_id"]), ] ) f.write(cmd) f.write("\n") f.write("\n") f.write("{} {}\n".format(batch_system["submit"], script_post)) f.write("\n") post_header = make_batch_post_header(system) with open(script_post, "w") as f: f.write(post_header) extra_file = spatial_ts_dict["extra_file"] myfiles = " ".join( [ "{}_{}.000.nc".format(extra_file, k) for k in range(simulation_start_year + exstep, simulation_end_year, exstep) ] ) myoutfile = extra_file + ".nc" myoutfile = os.path.join(odir, spatial_dir, os.path.split(myoutfile)[-1]) cmd = " ".join(["ncrcat -O -6 -h", myfiles, myoutfile, "\n"]) f.write(cmd) scripts = uniquify_list(scripts) scripts_post = uniquify_list(scripts_post) print("\n".join([script for script in scripts])) print("\nwritten\n") print("\n".join([script for script in scripts_post])) print("\nwritten\n")
itertools.product( sia_e_values,
random_line_split
polynom.py
""" Modular arithmetic """ from collections import defaultdict import numpy as np class ModInt: """ Integers of Z/pZ """ def __init__(self, a, n): self.v = a % n self.n = n def __eq__(a, b): if isinstance(b, ModInt): return not bool(a - b) else: return NotImplemented
def __bool__(self): return bool(self.v) def __add__(a, b): assert isinstance(b, ModInt) assert a.n == b.n return ModInt(a.v + b.v, a.n) def __radd__(a, b): assert isinstance(b, int) return ModInt(a.v + b, a.n) def __neg__(a): return ModInt(-a.v, a.n) def __sub__(a, b): return ModInt(a.v - b.v, a.n) def __mul__(a, b): if isinstance(b, int): return ModInt(b * a.v, a.n) elif isinstance(b, ModInt): assert a.n == b.n return ModInt(a.v * b.v, a.n) return NotImplemented def __rmul__(a, b): return a * b def __pow__(P, k): assert isinstance(k, int) V = 1 A = P while k: if k & 1: V *= A k >>= 1 if not k: break A *= A return V def inv(self): if self.v == 0: raise ZeroDivisionError return ModInt(ModInt._inv(self.v, self.n), self.n) @staticmethod def _inv(k, n): k %= n if k == 1: return k return (n - n // k) * ModInt._inv(n % k, n) % n def __truediv__(a, b): assert isinstance(b, ModInt) assert a.n == b.n return a * b.inv() def __rtruediv__(a, k): assert isinstance(k, int) return ModInt(k, a.n) / a @staticmethod def extended_euclid(a, b): """Extended Euclid algorithm Return ------ x : int y : int a * x + b * y = gcd(a, b) """ A, B = a, b sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1) xp, yp = 1, 0 x, y = 0, 1 while b: assert A * xp + B * yp == a assert A * x + B * y == b r = a // b a, b = b, a % b x, xp = xp - r * x, x y, yp = yp - r * y, y return sa * xp, sb * yp def __repr__(self): return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n) def __str__(self): return '%s' % self.v class Polynomial: """ Generic class for polynomials Works with int, float and ModInt """ def __len__(self): return len(self.C) def trim(C): i = len(C) - 1 while i >= 0 and not C[i]: i -= 1 return C[:i + 1] def __init__(self, C=None): if C is None: C = [] self.C = Polynomial.trim(C) @property def deg(self): return len(self.C) - 1 def prime(self): return Polynomial([i * self[i] for i in range(1, len(self))]) def eval(self, x): if not self: return 0 v = self[-1] for c in self[-2::-1]: v = v * x + c return v def shift(self, d): return Polynomial( [0 * self[0]] * d + self.C if self else []) def __eq__(P, Q): return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q)) def __hash__(self): return hash(tuple(self.C)) def __call__(self, x): return Polynomial.eval(self, x) def __getitem__(self, x): return self.C[x] def __neg__(P): return Polynomial([-c for c in P.C]) def __add__(P, Q): if len(P.C) < len(Q.C): P, Q = Q, P return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):]) def __sub__(P, Q): return P + (-Q) def _mulpoly(P, Q): assert isinstance(Q, Polynomial) return Polynomial([sum(P[k] * Q[d - k] for k in range(max(0, d + 1 - len(Q)), min(d + 1, len(P))) ) for d in range(len(P) + len(Q) - 1)]) def _mulscal(P, k): return Polynomial([k * c for c in P]) def __mul__(P, Q): if isinstance(Q, Polynomial): return P._mulpoly(Q) return P._mulscal(Q) def __rmul__(P, Q): return P * Q def __pow__(P, k): assert isinstance(k, int) V = 1 A = P while k: if k & 1: V *= A k >>= 1 if not k: break A *= A return V def __iter__(self): yield from self.C def euclidean_division(A, B): Q = [0 * B[0]] * max(0, len(A) - len(B) + 1) while len(A.C) >= len(B.C): Q[len(A.C) - len(B.C)] = A[-1] / B[-1] A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1]) return Polynomial(Q), A def __floordiv__(A, B): assert isinstance(B, Polynomial) return A.euclidean_division(B)[0] def __mod__(A, B): """ Polynomial euclidian division or modular reduction """ if isinstance(B, Polynomial): return A.euclidean_division(B)[1] else: assert isinstance(B, int) assert all(isinstance(c, int) for c in A) return A.reduceP(B) def __lt__(A, B): return A.deg < B.deg def __bool__(self): return bool(self.C) def gcd(A, B): while B: A, B = B, A % B return A * (1 / A[-1]) @staticmethod def gaussianElimKer(M, zero, one): """ Outputs an element of the kernel of M zero and one are elements of the same field """ # V satisfies the invariant # M = V M_0 V = [Polynomial([zero] * i + [one]) for i in range(len(M))] pivots = [None] * (len(M) + 1) for l in range(len(M)): while M[l].deg >= 0: idp = M[l].deg if pivots[idp] is None: pivots[idp] = l break else: c = M[l][idp] / M[pivots[idp]][idp] M[l] -= c * M[pivots[idp]] V[l] -= c * V[pivots[idp]] else: # If a line is null, we found an element of the kernel return V[l] return None def computeQ(P): # only for Z/pZ[X] square-free polynoms, for p prime p = P[0].n # We ignore the image of 1 because (F-Id)(1) = 0 M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P for i in range(1, P.deg)] # M -= Id for i in range(1, P.deg): M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)]) # We find an element of the kernel by Gaussian elimination pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p)) # We put back the 1 tha was removed return pQ.shift(1) if pQ is not None else None def factor_unit(P): """ Berlekamp's algorithm only in Z/pZ """ assert all(isinstance(c, ModInt) for c in P) assert len(set(c.n for c in P)) == 1 if P.deg == 1: return defaultdict(int, {P: 1}) p = P[0].n S = Polynomial.gcd(P, P.prime()) if S.deg == P.deg: # P' = 0 so P = R^p R = Polynomial(P.C[::p]) return defaultdict(int, {D: p * v for D, v in Polynomial.factor_unit(R).items()}) else: factors = defaultdict(int) if S.deg: for D, v in S.factor_unit().items(): factors[D] += v P //= S # P is now square-free # We look for Q in Ker(F-Id) \ {1} Q = Polynomial.computeQ(P) if Q is None: # P is irreducible factors[P] += 1 else: # P is the product of the gcd(P, Q-i) # that are factored recursively for i in range(p): D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)])) if D.deg: for DD, v in D.factor_unit().items(): factors[DD] += v return factors def factor(P): """ Factorization of P only in Z/pZ """ cd = P[-1] if P.deg == 0: return (cd, defaultdict(int)) P = P * (1 / cd) return (cd, P.factor_unit()) @staticmethod def ppfactors(fz): c, Ds = fz a = str(c) if not Ds or c * c != c else '' l = [a] + [(str(D) if D.deg == 1 and not D[0] else ('(%s)' % D)) + (v > 1) * ('^%s' % v) for D, v in sorted(Ds.items(), key=lambda e: (e[0].deg, e[1]))] return '⋅'.join(i for i in l if i) def reduceP(P, p): return Polynomial([ModInt(c, p) for c in P]) @staticmethod def sign_changes(l): return sum(a * b < 0 for a, b in zip(l, l[1:])) def isreal(P): return not any(isinstance(c, ModInt) for c in P) def isinteger(P): return all(isinstance(c, int) for c in P) def sturm(P): """ Number of distinct real roots by Sturm's theorem. Only works on int or float coefficients """ inf = float('inf') assert P.isreal() A = P B = A.prime() l1 = [A(-inf)] l2 = [A(inf)] while B: l1.append(B(-inf)) l2.append(B(inf)) B, A = -A % B, B return Polynomial.sign_changes(l1) - Polynomial.sign_changes(l2) @property def r1(P): """ Number of real roots with multiplicity """ assert P.isreal() ans = 0 s = P.sturm() while s: ans += s P = P.gcd(P.prime()) s = P.sturm() return ans @property def r2(P): ans = P.deg - P.r1 assert ans % 2 == 0 return ans // 2 def sylvester(P, Q): """ Sylvester's matrix """ assert P.isreal() assert Q.isreal() p = P.deg q = Q.deg P = np.array(P) Q = np.array(Q) m = np.zeros((p + q, p + q)) for i in range(q): m[i][i:i + p + 1] = P for i in range(p): m[q + i][i:i + q + 1] = Q return m def resultant(P, Q): """ Resultant of two real polynomials """ return np.linalg.det(P.sylvester(Q)) @property def disc(P): """ Discriminant of a real polynomial """ ans = P.resultant(P.prime()) / P[-1] if P.isinteger(): ans = int(ans.round()) if P.deg % 4 in [0, 1]: return ans else: return -ans def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.C) @staticmethod def _formatmonomial(c, d): assert c a = b = '' if c * c != c or not d: a = str(c) + (d != 0) * '⋅' if d > 1: b = 'X^' + str(d) elif d == 1: b = 'X' return a + b def __str__(self): if not self.C: return "0" ans = '+'.join(self._formatmonomial(c, d) for (d, c) in reversed(list(enumerate(self))) if c) return ans.replace("+-", "-").replace('-1⋅', '-')
def __hash__(self): return hash((self.v, self.n))
random_line_split
polynom.py
""" Modular arithmetic """ from collections import defaultdict import numpy as np class ModInt: """ Integers of Z/pZ """ def __init__(self, a, n): self.v = a % n self.n = n def __eq__(a, b): if isinstance(b, ModInt): return not bool(a - b) else: return NotImplemented def __hash__(self): return hash((self.v, self.n)) def __bool__(self): return bool(self.v) def __add__(a, b): assert isinstance(b, ModInt) assert a.n == b.n return ModInt(a.v + b.v, a.n) def __radd__(a, b): assert isinstance(b, int) return ModInt(a.v + b, a.n) def __neg__(a): return ModInt(-a.v, a.n) def __sub__(a, b): return ModInt(a.v - b.v, a.n) def __mul__(a, b): if isinstance(b, int): return ModInt(b * a.v, a.n) elif isinstance(b, ModInt): assert a.n == b.n return ModInt(a.v * b.v, a.n) return NotImplemented def __rmul__(a, b): return a * b def __pow__(P, k): assert isinstance(k, int) V = 1 A = P while k: if k & 1: V *= A k >>= 1 if not k: break A *= A return V def inv(self): if self.v == 0: raise ZeroDivisionError return ModInt(ModInt._inv(self.v, self.n), self.n) @staticmethod def _inv(k, n): k %= n if k == 1: return k return (n - n // k) * ModInt._inv(n % k, n) % n def __truediv__(a, b): assert isinstance(b, ModInt) assert a.n == b.n return a * b.inv() def __rtruediv__(a, k): assert isinstance(k, int) return ModInt(k, a.n) / a @staticmethod def extended_euclid(a, b): """Extended Euclid algorithm Return ------ x : int y : int a * x + b * y = gcd(a, b) """ A, B = a, b sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1) xp, yp = 1, 0 x, y = 0, 1 while b: assert A * xp + B * yp == a assert A * x + B * y == b r = a // b a, b = b, a % b x, xp = xp - r * x, x y, yp = yp - r * y, y return sa * xp, sb * yp def __repr__(self): return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n) def __str__(self): return '%s' % self.v class Polynomial: """ Generic class for polynomials Works with int, float and ModInt """ def __len__(self): return len(self.C) def trim(C): i = len(C) - 1 while i >= 0 and not C[i]: i -= 1 return C[:i + 1] def __init__(self, C=None): if C is None: C = [] self.C = Polynomial.trim(C) @property def deg(self): return len(self.C) - 1 def prime(self): return Polynomial([i * self[i] for i in range(1, len(self))]) def eval(self, x): if not self: return 0 v = self[-1] for c in self[-2::-1]: v = v * x + c return v def shift(self, d): return Polynomial( [0 * self[0]] * d + self.C if self else []) def __eq__(P, Q): return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q)) def __hash__(self): return hash(tuple(self.C)) def __call__(self, x): return Polynomial.eval(self, x) def __getitem__(self, x): return self.C[x] def __neg__(P): return Polynomial([-c for c in P.C]) def __add__(P, Q): if len(P.C) < len(Q.C): P, Q = Q, P return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):]) def __sub__(P, Q): return P + (-Q) def _mulpoly(P, Q): assert isinstance(Q, Polynomial) return Polynomial([sum(P[k] * Q[d - k] for k in range(max(0, d + 1 - len(Q)), min(d + 1, len(P))) ) for d in range(len(P) + len(Q) - 1)]) def _mulscal(P, k): return Polynomial([k * c for c in P]) def __mul__(P, Q): if isinstance(Q, Polynomial): return P._mulpoly(Q) return P._mulscal(Q) def __rmul__(P, Q): return P * Q def __pow__(P, k): assert isinstance(k, int) V = 1 A = P while k: if k & 1: V *= A k >>= 1 if not k: break A *= A return V def __iter__(self): yield from self.C def euclidean_division(A, B): Q = [0 * B[0]] * max(0, len(A) - len(B) + 1) while len(A.C) >= len(B.C): Q[len(A.C) - len(B.C)] = A[-1] / B[-1] A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1]) return Polynomial(Q), A def __floordiv__(A, B): assert isinstance(B, Polynomial) return A.euclidean_division(B)[0] def __mod__(A, B): """ Polynomial euclidian division or modular reduction """ if isinstance(B, Polynomial): return A.euclidean_division(B)[1] else: assert isinstance(B, int) assert all(isinstance(c, int) for c in A) return A.reduceP(B) def __lt__(A, B): return A.deg < B.deg def __bool__(self): return bool(self.C) def gcd(A, B): while B: A, B = B, A % B return A * (1 / A[-1]) @staticmethod def gaussianElimKer(M, zero, one): """ Outputs an element of the kernel of M zero and one are elements of the same field """ # V satisfies the invariant # M = V M_0 V = [Polynomial([zero] * i + [one]) for i in range(len(M))] pivots = [None] * (len(M) + 1) for l in range(len(M)): while M[l].deg >= 0: idp = M[l].deg if pivots[idp] is None: pivots[idp] = l break else: c = M[l][idp] / M[pivots[idp]][idp] M[l] -= c * M[pivots[idp]] V[l] -= c * V[pivots[idp]] else: # If a line is null, we found an element of the kernel return V[l] return None def computeQ(P): # only for Z/pZ[X] square-free polynoms, for p prime p = P[0].n # We ignore the image of 1 because (F-Id)(1) = 0 M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P for i in range(1, P.deg)] # M -= Id for i in range(1, P.deg): M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)]) # We find an element of the kernel by Gaussian elimination pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p)) # We put back the 1 tha was removed return pQ.shift(1) if pQ is not None else None def factor_unit(P): """ Berlekamp's algorithm only in Z/pZ """ assert all(isinstance(c, ModInt) for c in P) assert len(set(c.n for c in P)) == 1 if P.deg == 1: return defaultdict(int, {P: 1}) p = P[0].n S = Polynomial.gcd(P, P.prime()) if S.deg == P.deg: # P' = 0 so P = R^p R = Polynomial(P.C[::p]) return defaultdict(int, {D: p * v for D, v in Polynomial.factor_unit(R).items()}) else: factors = defaultdict(int) if S.deg: for D, v in S.factor_unit().items(): factors[D] += v P //= S # P is now square-free # We look for Q in Ker(F-Id) \ {1} Q = Polynomial.computeQ(P) if Q is None: # P is irreducible factors[P] += 1 else: # P is the product of the gcd(P, Q-i) # that are factored recursively for i in range(p): D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)])) if D.deg: for DD, v in D.factor_unit().items(): factors[DD] += v return factors def factor(P): """ Factorization of P only in Z/pZ """ cd = P[-1] if P.deg == 0: return (cd, defaultdict(int)) P = P * (1 / cd) return (cd, P.factor_unit()) @staticmethod def ppfactors(fz): c, Ds = fz a = str(c) if not Ds or c * c != c else '' l = [a] + [(str(D) if D.deg == 1 and not D[0] else ('(%s)' % D)) + (v > 1) * ('^%s' % v) for D, v in sorted(Ds.items(), key=lambda e: (e[0].deg, e[1]))] return '⋅'.join(i for i in l if i) def reduceP(P, p): re
@staticmethod def sign_changes(l): return sum(a * b < 0 for a, b in zip(l, l[1:])) def isreal(P): return not any(isinstance(c, ModInt) for c in P) def isinteger(P): return all(isinstance(c, int) for c in P) def sturm(P): """ Number of distinct real roots by Sturm's theorem. Only works on int or float coefficients """ inf = float('inf') assert P.isreal() A = P B = A.prime() l1 = [A(-inf)] l2 = [A(inf)] while B: l1.append(B(-inf)) l2.append(B(inf)) B, A = -A % B, B return Polynomial.sign_changes(l1) - Polynomial.sign_changes(l2) @property def r1(P): """ Number of real roots with multiplicity """ assert P.isreal() ans = 0 s = P.sturm() while s: ans += s P = P.gcd(P.prime()) s = P.sturm() return ans @property def r2(P): ans = P.deg - P.r1 assert ans % 2 == 0 return ans // 2 def sylvester(P, Q): """ Sylvester's matrix """ assert P.isreal() assert Q.isreal() p = P.deg q = Q.deg P = np.array(P) Q = np.array(Q) m = np.zeros((p + q, p + q)) for i in range(q): m[i][i:i + p + 1] = P for i in range(p): m[q + i][i:i + q + 1] = Q return m def resultant(P, Q): """ Resultant of two real polynomials """ return np.linalg.det(P.sylvester(Q)) @property def disc(P): """ Discriminant of a real polynomial """ ans = P.resultant(P.prime()) / P[-1] if P.isinteger(): ans = int(ans.round()) if P.deg % 4 in [0, 1]: return ans else: return -ans def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.C) @staticmethod def _formatmonomial(c, d): assert c a = b = '' if c * c != c or not d: a = str(c) + (d != 0) * '⋅' if d > 1: b = 'X^' + str(d) elif d == 1: b = 'X' return a + b def __str__(self): if not self.C: return "0" ans = '+'.join(self._formatmonomial(c, d) for (d, c) in reversed(list(enumerate(self))) if c) return ans.replace("+-", "-").replace('-1⋅', '-')
turn Polynomial([ModInt(c, p) for c in P])
identifier_body