patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -60,6 +60,11 @@ module Bolt print_table('tasks' => tasks, 'modulepath' => modulepath) end + def print_plugin_list(plugins, modulepath) + plugins.delete(:validate_resolve_reference) + print_table('plugins' => plugins, 'modulepath' => modulepath) + end + def print_plan_info(plan) path = plan.delete('module') plan['module_dir'] = if path.start_with?(Bolt::Config::Modulepath::MODULES_PATH)
1
# frozen_string_literal: true module Bolt class Outputter class JSON < Bolt::Outputter def initialize(color, verbose, trace, spin, stream = $stdout) super @items_open = false @object_open = false @preceding_item = false end def print_head @stream.puts '{ "items": [' @preceding_item = false @items_open = true @object_open = true end def handle_event(event) case event[:type] when :node_result print_result(event[:result]) when :message print_message(event[:message]) end end def print_result(result) @stream.puts ',' if @preceding_item @stream.puts result.to_json @preceding_item = true end def print_summary(results, elapsed_time) @stream.puts "],\n" @preceding_item = false @items_open = false @stream.puts format('"target_count": %<size>d, "elapsed_time": %<elapsed>d }', size: results.size, elapsed: elapsed_time) end def print_table(results) @stream.puts results.to_json end alias print_module_list print_table def print_task_info(task) path = task.files.first['path'].chomp("/tasks/#{task.files.first['name']}") module_dir = if path.start_with?(Bolt::Config::Modulepath::MODULES_PATH) "built-in module" else path end @stream.puts task.to_h.merge(module_dir: module_dir).to_json end def print_tasks(tasks, modulepath) print_table('tasks' => tasks, 'modulepath' => modulepath) end def print_plan_info(plan) path = plan.delete('module') plan['module_dir'] = if path.start_with?(Bolt::Config::Modulepath::MODULES_PATH) "built-in module" else path end @stream.puts plan.to_json end def print_plans(plans, modulepath) print_table('plans' => plans, 'modulepath' => modulepath) end def print_apply_result(apply_result, _elapsed_time) @stream.puts apply_result.to_json end def print_plan_result(result) # Ruby JSON patches most objects to have a to_json method. @stream.puts result.to_json end def print_result_set(result_set) @stream.puts result_set.to_json end def print_topics(topics) print_table('topics' => topics) end def print_guide(guide, topic) @stream.puts({ 'topic' => topic, 'guide' => guide }.to_json) end def print_puppetfile_result(success, puppetfile, moduledir) @stream.puts({ success: success, puppetfile: puppetfile, moduledir: moduledir.to_s }.to_json) end def print_targets(target_list, inventory_source, default_inventory, _target_flag) @stream.puts ::JSON.pretty_generate( inventory: { targets: target_list[:inventory].map(&:name), count: target_list[:inventory].count, file: (inventory_source || default_inventory).to_s }, adhoc: { targets: target_list[:adhoc].map(&:name), count: target_list[:adhoc].count }, targets: target_list.values.flatten.map(&:name), count: target_list.values.flatten.count ) end def print_target_info(target_list, _inventory_source, _default_inventory, _target_flag) targets = target_list.values.flatten @stream.puts ::JSON.pretty_generate( targets: targets.map(&:detail), count: targets.count ) end def print_groups(groups, _inventory_source, _default_inventory) count = groups.count @stream.puts({ groups: groups, count: count }.to_json) end def fatal_error(err) @stream.puts "],\n" if @items_open @stream.puts '"_error": ' if @object_open err_obj = err.to_h if @trace && err.backtrace err_obj[:details] ||= {} err_obj[:details][:backtrace] = err.backtrace end @stream.puts err_obj.to_json @stream.puts '}' if @object_open end def print_message(message) $stderr.puts(message) end alias print_error print_message def print_action_step(step) $stderr.puts(step) end alias print_action_error print_action_step end end end
1
18,547
I think this should also filter out `validate_resolve_reference` hooks.
puppetlabs-bolt
rb
@@ -18,8 +18,8 @@ using System; using System.Diagnostics; using System.Security; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; -using OpenTelemetry.Trace; using OpenTelemetry.Metrics; +using OpenTelemetry.Trace; namespace OpenTelemetry.Exporter {
1
// <copyright file="OtlpExporterOptions.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; using System.Security; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; using OpenTelemetry.Trace; using OpenTelemetry.Metrics; namespace OpenTelemetry.Exporter { /// <summary> /// Configuration options for the OpenTelemetry Protocol (OTLP) exporter. /// </summary> public class OtlpExporterOptions { internal const string EndpointEnvVarName = "OTEL_EXPORTER_OTLP_ENDPOINT"; internal const string HeadersEnvVarName = "OTEL_EXPORTER_OTLP_HEADERS"; internal const string TimeoutEnvVarName = "OTEL_EXPORTER_OTLP_TIMEOUT"; /// <summary> /// Initializes a new instance of the <see cref="OtlpExporterOptions"/> class. /// </summary> public OtlpExporterOptions() { try { string endpointEnvVar = Environment.GetEnvironmentVariable(EndpointEnvVarName); if (!string.IsNullOrEmpty(endpointEnvVar)) { if (Uri.TryCreate(endpointEnvVar, UriKind.Absolute, out var endpoint)) { this.Endpoint = endpoint; } else { OpenTelemetryProtocolExporterEventSource.Log.FailedToParseEnvironmentVariable(EndpointEnvVarName, endpointEnvVar); } } string headersEnvVar = Environment.GetEnvironmentVariable(HeadersEnvVarName); if (!string.IsNullOrEmpty(headersEnvVar)) { this.Headers = headersEnvVar; } string timeoutEnvVar = Environment.GetEnvironmentVariable(TimeoutEnvVarName); if (!string.IsNullOrEmpty(timeoutEnvVar)) { if (int.TryParse(timeoutEnvVar, out var timeout)) { this.TimeoutMilliseconds = timeout; } else { OpenTelemetryProtocolExporterEventSource.Log.FailedToParseEnvironmentVariable(TimeoutEnvVarName, timeoutEnvVar); } } } catch (SecurityException ex) { // The caller does not have the required permission to // retrieve the value of an environment variable from the current process. OpenTelemetryProtocolExporterEventSource.Log.MissingPermissionsToReadEnvironmentVariable(ex); } } /// <summary> /// Gets or sets the target to which the exporter is going to send traces. /// Must be a valid Uri with scheme (http) and host, and /// may contain a port and path. Secure connection(https) is not /// supported. /// </summary> public Uri Endpoint { get; set; } = new Uri("http://localhost:4317"); /// <summary> /// Gets or sets optional headers for the connection. Refer to the <a href="https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specifying-headers-via-environment-variables"> /// specification</a> for information on the expected format for Headers. /// </summary> public string Headers { get; set; } /// <summary> /// Gets or sets the max waiting time (in milliseconds) for the backend to process each span batch. The default value is 10000. /// </summary> public int TimeoutMilliseconds { get; set; } = 10000; /// <summary> /// Gets or sets the export processor type to be used with the OpenTelemetry Protocol Exporter. The default value is <see cref="ExportProcessorType.Batch"/>. /// </summary> public ExportProcessorType ExportProcessorType { get; set; } = ExportProcessorType.Batch; /// <summary> /// Gets or sets the BatchExportProcessor options. Ignored unless ExportProcessorType is Batch. /// </summary> public BatchExportProcessorOptions<Activity> BatchExportProcessorOptions { get; set; } = new BatchExportActivityProcessorOptions(); /// <summary> /// Gets or sets the metric export interval in milliseconds. The default value is 1000 milliseconds. /// </summary> public int MetricExportIntervalMilliseconds { get; set; } = 1000; /// <summary> /// Gets or sets the AggregationTemporality used for Histogram /// and Sum metrics. /// </summary> public AggregationTemporality AggregationTemporality { get; set; } = AggregationTemporality.Cumulative; } }
1
21,239
unrelated, but required fix as metrics branch got broken with the latest main->metrics merge and conflict resolution.
open-telemetry-opentelemetry-dotnet
.cs
@@ -1,9 +1,8 @@ # -*- coding: UTF-8 -*- -#globalCommands.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. -#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Rui Batista, Joseph Lee, Leonard de Ruijter, Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger, Łukasz Golonka +# Copyright (C) 2006-2019 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Rui Batista, Joseph Lee, Leonard de Ruijter, Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger, Łukasz Golonka, Jakub Lukowicz import time import itertools
1
# -*- coding: UTF-8 -*- #globalCommands.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. #Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Rui Batista, Joseph Lee, Leonard de Ruijter, Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger, Łukasz Golonka import time import itertools import tones import audioDucking import touchHandler import keyboardHandler import mouseHandler import eventHandler import review import controlTypes import api import textInfos import speech import sayAllHandler from NVDAObjects import NVDAObject, NVDAObjectTextInfo import globalVars from logHandler import log from synthDriverHandler import * import gui import wx import config import winUser import appModuleHandler import winKernel import treeInterceptorHandler import browseMode import scriptHandler from scriptHandler import script import ui import braille import brailleInput import inputCore import virtualBuffers import characterProcessing from baseObject import ScriptableObject import core import winVersion from base64 import b16encode import vision #: Script category for text review commands. # Translators: The name of a category of NVDA commands. SCRCAT_TEXTREVIEW = _("Text review") #: Script category for Object navigation commands. # Translators: The name of a category of NVDA commands. SCRCAT_OBJECTNAVIGATION = _("Object navigation") #: Script category for system caret commands. # Translators: The name of a category of NVDA commands. SCRCAT_SYSTEMCARET = _("System caret") #: Script category for mouse commands. # Translators: The name of a category of NVDA commands. SCRCAT_MOUSE = _("Mouse") #: Script category for speech commands. # Translators: The name of a category of NVDA commands. SCRCAT_SPEECH = _("Speech") #: Script category for configuration dialogs commands. # Translators: The name of a category of NVDA commands. SCRCAT_CONFIG = _("Configuration") #: Script category for configuration profile activation and management commands. # Translators: The name of a category of NVDA commands. SCRCAT_CONFIG_PROFILES = _("Configuration profiles") #: Script category for Braille commands. # Translators: The name of a category of NVDA commands. SCRCAT_BRAILLE = _("Braille") #: Script category for Vision commands. # Translators: The name of a category of NVDA commands. SCRCAT_VISION = _("Vision") #: Script category for tools commands. # Translators: The name of a category of NVDA commands. SCRCAT_TOOLS = pgettext('script category', 'Tools') #: Script category for touch commands. # Translators: The name of a category of NVDA commands. SCRCAT_TOUCH = _("Touch screen") #: Script category for focus commands. # Translators: The name of a category of NVDA commands. SCRCAT_FOCUS = _("System focus") #: Script category for system status commands. # Translators: The name of a category of NVDA commands. SCRCAT_SYSTEM = _("System status") #: Script category for input commands. # Translators: The name of a category of NVDA commands. SCRCAT_INPUT = _("Input") #: Script category for document formatting commands. # Translators: The name of a category of NVDA commands. SCRCAT_DOCUMENTFORMATTING = _("Document formatting") class GlobalCommands(ScriptableObject): """Commands that are available at all times, regardless of the current focus. """ def script_cycleAudioDuckingMode(self,gesture): if not audioDucking.isAudioDuckingSupported(): # Translators: a message when audio ducking is not supported on this machine ui.message(_("Audio ducking not supported")) return curMode=config.conf['audio']['audioDuckingMode'] numModes=len(audioDucking.audioDuckingModes) nextMode=(curMode+1)%numModes audioDucking.setAudioDuckingMode(nextMode) config.conf['audio']['audioDuckingMode']=nextMode nextLabel=audioDucking.audioDuckingModes[nextMode] ui.message(nextLabel) # Translators: Describes the Cycle audio ducking mode command. script_cycleAudioDuckingMode.__doc__=_("Cycles through audio ducking modes which determine when NVDA lowers the volume of other sounds") def script_toggleInputHelp(self,gesture): inputCore.manager.isInputHelpActive = not inputCore.manager.isInputHelpActive # Translators: This will be presented when the input help is toggled. stateOn = _("input help on") # Translators: This will be presented when the input help is toggled. stateOff = _("input help off") state = stateOn if inputCore.manager.isInputHelpActive else stateOff ui.message(state) # Translators: Input help mode message for toggle input help command. script_toggleInputHelp.__doc__=_("Turns input help on or off. When on, any input such as pressing a key on the keyboard will tell you what script is associated with that input, if any.") script_toggleInputHelp.category=SCRCAT_INPUT def script_toggleCurrentAppSleepMode(self,gesture): curFocus=api.getFocusObject() curApp=curFocus.appModule if curApp.sleepMode: curApp.sleepMode=False # Translators: This is presented when sleep mode is deactivated, NVDA will continue working as expected. ui.message(_("Sleep mode off")) eventHandler.executeEvent("gainFocus",curFocus) else: eventHandler.executeEvent("loseFocus",curFocus) curApp.sleepMode=True # Translators: This is presented when sleep mode is activated, the focused application is self voicing, such as klango or openbook. ui.message(_("Sleep mode on")) # Translators: Input help mode message for toggle sleep mode command. script_toggleCurrentAppSleepMode.__doc__=_("Toggles sleep mode on and off for the active application.") script_toggleCurrentAppSleepMode.allowInSleepMode=True def script_reportCurrentLine(self,gesture): obj=api.getFocusObject() treeInterceptor=obj.treeInterceptor if isinstance(treeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor) and not treeInterceptor.passThrough: obj=treeInterceptor try: info=obj.makeTextInfo(textInfos.POSITION_CARET) except (NotImplementedError, RuntimeError): info=obj.makeTextInfo(textInfos.POSITION_FIRST) info.expand(textInfos.UNIT_LINE) scriptCount=scriptHandler.getLastScriptRepeatCount() if scriptCount==0: speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET) else: speech.spellTextInfo(info,useCharacterDescriptions=scriptCount>1) # Translators: Input help mode message for report current line command. script_reportCurrentLine.__doc__=_("Reports the current line under the application cursor. Pressing this key twice will spell the current line. Pressing three times will spell the line using character descriptions.") script_reportCurrentLine.category=SCRCAT_SYSTEMCARET def script_leftMouseClick(self,gesture): # Translators: Reported when left mouse button is clicked. ui.message(_("Left click")) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_LEFTDOWN,0,0) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_LEFTUP,0,0) # Translators: Input help mode message for left mouse click command. script_leftMouseClick.__doc__=_("Clicks the left mouse button once at the current mouse position") script_leftMouseClick.category=SCRCAT_MOUSE def script_rightMouseClick(self,gesture): # Translators: Reported when right mouse button is clicked. ui.message(_("Right click")) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_RIGHTDOWN,0,0) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_RIGHTUP,0,0) # Translators: Input help mode message for right mouse click command. script_rightMouseClick.__doc__=_("Clicks the right mouse button once at the current mouse position") script_rightMouseClick.category=SCRCAT_MOUSE def script_toggleLeftMouseButton(self,gesture): if winUser.getKeyState(winUser.VK_LBUTTON)&32768: # Translators: This is presented when the left mouse button lock is released (used for drag and drop). ui.message(_("Left mouse button unlock")) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_LEFTUP,0,0) else: # Translators: This is presented when the left mouse button is locked down (used for drag and drop). ui.message(_("Left mouse button lock")) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_LEFTDOWN,0,0) # Translators: Input help mode message for left mouse lock/unlock toggle command. script_toggleLeftMouseButton.__doc__=_("Locks or unlocks the left mouse button") script_toggleLeftMouseButton.category=SCRCAT_MOUSE def script_toggleRightMouseButton(self,gesture): if winUser.getKeyState(winUser.VK_RBUTTON)&32768: # Translators: This is presented when the right mouse button lock is released (used for drag and drop). ui.message(_("Right mouse button unlock")) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_RIGHTUP,0,0) else: # Translators: This is presented when the right mouse button is locked down (used for drag and drop). ui.message(_("Right mouse button lock")) mouseHandler.executeMouseEvent(winUser.MOUSEEVENTF_RIGHTDOWN,0,0) # Translators: Input help mode message for right mouse lock/unlock command. script_toggleRightMouseButton.__doc__=_("Locks or unlocks the right mouse button") script_toggleRightMouseButton.category=SCRCAT_MOUSE def script_reportCurrentSelection(self,gesture): obj=api.getFocusObject() treeInterceptor=obj.treeInterceptor if isinstance(treeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor) and not treeInterceptor.passThrough: obj=treeInterceptor try: info=obj.makeTextInfo(textInfos.POSITION_SELECTION) except (RuntimeError, NotImplementedError): info=None if not info or info.isCollapsed: speech.speakMessage(_("No selection")) else: speech.speakTextSelected(info.text) # Translators: Input help mode message for report current selection command. script_reportCurrentSelection.__doc__=_("Announces the current selection in edit controls and documents. If there is no selection it says so.") script_reportCurrentSelection.category=SCRCAT_SYSTEMCARET def script_dateTime(self,gesture): if scriptHandler.getLastScriptRepeatCount()==0: text=winKernel.GetTimeFormatEx(winKernel.LOCALE_NAME_USER_DEFAULT, winKernel.TIME_NOSECONDS, None, None) else: text=winKernel.GetDateFormatEx(winKernel.LOCALE_NAME_USER_DEFAULT, winKernel.DATE_LONGDATE, None, None) ui.message(text) # Translators: Input help mode message for report date and time command. script_dateTime.__doc__=_("If pressed once, reports the current time. If pressed twice, reports the current date") script_dateTime.category=SCRCAT_SYSTEM def script_increaseSynthSetting(self,gesture): settingName=globalVars.settingsRing.currentSettingName if not settingName: # Translators: Reported when there are no settings to configure in synth settings ring (example: when there is no setting for language). ui.message(_("No settings")) return settingValue=globalVars.settingsRing.increase() ui.message("%s %s" % (settingName,settingValue)) # Translators: Input help mode message for increase synth setting value command. script_increaseSynthSetting.__doc__=_("Increases the currently active setting in the synth settings ring") script_increaseSynthSetting.category=SCRCAT_SPEECH def script_decreaseSynthSetting(self,gesture): settingName=globalVars.settingsRing.currentSettingName if not settingName: ui.message(_("No settings")) return settingValue=globalVars.settingsRing.decrease() ui.message("%s %s" % (settingName,settingValue)) # Translators: Input help mode message for decrease synth setting value command. script_decreaseSynthSetting.__doc__=_("Decreases the currently active setting in the synth settings ring") script_decreaseSynthSetting.category=SCRCAT_SPEECH def script_nextSynthSetting(self,gesture): nextSettingName=globalVars.settingsRing.next() if not nextSettingName: ui.message(_("No settings")) return nextSettingValue=globalVars.settingsRing.currentSettingValue ui.message("%s %s"%(nextSettingName,nextSettingValue)) # Translators: Input help mode message for next synth setting command. script_nextSynthSetting.__doc__=_("Moves to the next available setting in the synth settings ring") script_nextSynthSetting.category=SCRCAT_SPEECH def script_previousSynthSetting(self,gesture): previousSettingName=globalVars.settingsRing.previous() if not previousSettingName: ui.message(_("No settings")) return previousSettingValue=globalVars.settingsRing.currentSettingValue ui.message("%s %s"%(previousSettingName,previousSettingValue)) # Translators: Input help mode message for previous synth setting command. script_previousSynthSetting.__doc__=_("Moves to the previous available setting in the synth settings ring") script_previousSynthSetting.category=SCRCAT_SPEECH def script_toggleSpeakTypedCharacters(self,gesture): if config.conf["keyboard"]["speakTypedCharacters"]: # Translators: The message announced when toggling the speak typed characters keyboard setting. state = _("speak typed characters off") config.conf["keyboard"]["speakTypedCharacters"]=False else: # Translators: The message announced when toggling the speak typed characters keyboard setting. state = _("speak typed characters on") config.conf["keyboard"]["speakTypedCharacters"]=True ui.message(state) # Translators: Input help mode message for toggle speaked typed characters command. script_toggleSpeakTypedCharacters.__doc__=_("Toggles on and off the speaking of typed characters") script_toggleSpeakTypedCharacters.category=SCRCAT_SPEECH def script_toggleSpeakTypedWords(self,gesture): if config.conf["keyboard"]["speakTypedWords"]: # Translators: The message announced when toggling the speak typed words keyboard setting. state = _("speak typed words off") config.conf["keyboard"]["speakTypedWords"]=False else: # Translators: The message announced when toggling the speak typed words keyboard setting. state = _("speak typed words on") config.conf["keyboard"]["speakTypedWords"]=True ui.message(state) # Translators: Input help mode message for toggle speak typed words command. script_toggleSpeakTypedWords.__doc__=_("Toggles on and off the speaking of typed words") script_toggleSpeakTypedWords.category=SCRCAT_SPEECH def script_toggleSpeakCommandKeys(self,gesture): if config.conf["keyboard"]["speakCommandKeys"]: # Translators: The message announced when toggling the speak typed command keyboard setting. state = _("speak command keys off") config.conf["keyboard"]["speakCommandKeys"]=False else: # Translators: The message announced when toggling the speak typed command keyboard setting. state = _("speak command keys on") config.conf["keyboard"]["speakCommandKeys"]=True ui.message(state) # Translators: Input help mode message for toggle speak command keys command. script_toggleSpeakCommandKeys.__doc__=_("Toggles on and off the speaking of typed keys, that are not specifically characters") script_toggleSpeakCommandKeys.category=SCRCAT_SPEECH def script_toggleReportFontName(self,gesture): if config.conf["documentFormatting"]["reportFontName"]: # Translators: The message announced when toggling the report font name document formatting setting. state = _("report font name off") config.conf["documentFormatting"]["reportFontName"]=False else: # Translators: The message announced when toggling the report font name document formatting setting. state = _("report font name on") config.conf["documentFormatting"]["reportFontName"]=True ui.message(state) # Translators: Input help mode message for toggle report font name command. script_toggleReportFontName.__doc__=_("Toggles on and off the reporting of font changes") script_toggleReportFontName.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportFontSize(self,gesture): if config.conf["documentFormatting"]["reportFontSize"]: # Translators: The message announced when toggling the report font size document formatting setting. state = _("report font size off") config.conf["documentFormatting"]["reportFontSize"]=False else: # Translators: The message announced when toggling the report font size document formatting setting. state = _("report font size on") config.conf["documentFormatting"]["reportFontSize"]=True ui.message(state) # Translators: Input help mode message for toggle report font size command. script_toggleReportFontSize.__doc__=_("Toggles on and off the reporting of font size changes") script_toggleReportFontSize.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportFontAttributes(self,gesture): if config.conf["documentFormatting"]["reportFontAttributes"]: # Translators: The message announced when toggling the report font attributes document formatting setting. state = _("report font attributes off") config.conf["documentFormatting"]["reportFontAttributes"]=False else: # Translators: The message announced when toggling the report font attributes document formatting setting. state = _("report font attributes on") config.conf["documentFormatting"]["reportFontAttributes"]=True ui.message(state) # Translators: Input help mode message for toggle report font attributes command. script_toggleReportFontAttributes.__doc__=_("Toggles on and off the reporting of font attributes") script_toggleReportFontAttributes.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportRevisions(self,gesture): if config.conf["documentFormatting"]["reportRevisions"]: # Translators: The message announced when toggling the report revisions document formatting setting. state = _("report revisions off") config.conf["documentFormatting"]["reportRevisions"]=False else: # Translators: The message announced when toggling the report revisions document formatting setting. state = _("report revisions on") config.conf["documentFormatting"]["reportRevisions"]=True ui.message(state) # Translators: Input help mode message for toggle report revisions command. script_toggleReportRevisions.__doc__=_("Toggles on and off the reporting of revisions") script_toggleReportRevisions.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportEmphasis(self,gesture): if config.conf["documentFormatting"]["reportEmphasis"]: # Translators: The message announced when toggling the report emphasis document formatting setting. state = _("report emphasis off") config.conf["documentFormatting"]["reportEmphasis"]=False else: # Translators: The message announced when toggling the report emphasis document formatting setting. state = _("report emphasis on") config.conf["documentFormatting"]["reportEmphasis"]=True ui.message(state) # Translators: Input help mode message for toggle report emphasis command. script_toggleReportEmphasis.__doc__=_("Toggles on and off the reporting of emphasis") script_toggleReportEmphasis.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportColor(self,gesture): if config.conf["documentFormatting"]["reportColor"]: # Translators: The message announced when toggling the report colors document formatting setting. state = _("report colors off") config.conf["documentFormatting"]["reportColor"]=False else: # Translators: The message announced when toggling the report colors document formatting setting. state = _("report colors on") config.conf["documentFormatting"]["reportColor"]=True ui.message(state) # Translators: Input help mode message for toggle report colors command. script_toggleReportColor.__doc__=_("Toggles on and off the reporting of colors") script_toggleReportColor.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportAlignment(self,gesture): if config.conf["documentFormatting"]["reportAlignment"]: # Translators: The message announced when toggling the report alignment document formatting setting. state = _("report alignment off") config.conf["documentFormatting"]["reportAlignment"]=False else: # Translators: The message announced when toggling the report alignment document formatting setting. state = _("report alignment on") config.conf["documentFormatting"]["reportAlignment"]=True ui.message(state) # Translators: Input help mode message for toggle report alignment command. script_toggleReportAlignment.__doc__=_("Toggles on and off the reporting of text alignment") script_toggleReportAlignment.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportStyle(self,gesture): if config.conf["documentFormatting"]["reportStyle"]: # Translators: The message announced when toggling the report style document formatting setting. state = _("report style off") config.conf["documentFormatting"]["reportStyle"]=False else: # Translators: The message announced when toggling the report style document formatting setting. state = _("report style on") config.conf["documentFormatting"]["reportStyle"]=True ui.message(state) # Translators: Input help mode message for toggle report style command. script_toggleReportStyle.__doc__=_("Toggles on and off the reporting of style changes") script_toggleReportStyle.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportSpellingErrors(self,gesture): if config.conf["documentFormatting"]["reportSpellingErrors"]: # Translators: The message announced when toggling the report spelling errors document formatting setting. state = _("report spelling errors off") config.conf["documentFormatting"]["reportSpellingErrors"]=False else: # Translators: The message announced when toggling the report spelling errors document formatting setting. state = _("report spelling errors on") config.conf["documentFormatting"]["reportSpellingErrors"]=True ui.message(state) # Translators: Input help mode message for toggle report spelling errors command. script_toggleReportSpellingErrors.__doc__=_("Toggles on and off the reporting of spelling errors") script_toggleReportSpellingErrors.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportPage(self,gesture): if config.conf["documentFormatting"]["reportPage"]: # Translators: The message announced when toggling the report pages document formatting setting. state = _("report pages off") config.conf["documentFormatting"]["reportPage"]=False else: # Translators: The message announced when toggling the report pages document formatting setting. state = _("report pages on") config.conf["documentFormatting"]["reportPage"]=True ui.message(state) # Translators: Input help mode message for toggle report pages command. script_toggleReportPage.__doc__=_("Toggles on and off the reporting of pages") script_toggleReportPage.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportLineNumber(self,gesture): if config.conf["documentFormatting"]["reportLineNumber"]: # Translators: The message announced when toggling the report line numbers document formatting setting. state = _("report line numbers off") config.conf["documentFormatting"]["reportLineNumber"]=False else: # Translators: The message announced when toggling the report line numbers document formatting setting. state = _("report line numbers on") config.conf["documentFormatting"]["reportLineNumber"]=True ui.message(state) # Translators: Input help mode message for toggle report line numbers command. script_toggleReportLineNumber.__doc__=_("Toggles on and off the reporting of line numbers") script_toggleReportLineNumber.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportLineIndentation(self,gesture): lineIndentationSpeech = config.conf["documentFormatting"]["reportLineIndentation"] lineIndentationTones = config.conf["documentFormatting"]["reportLineIndentationWithTones"] if not lineIndentationSpeech and not lineIndentationTones: # Translators: A message reported when cycling through line indentation settings. ui.message(_("Report line indentation with speech")) lineIndentationSpeech = True elif lineIndentationSpeech and not lineIndentationTones: # Translators: A message reported when cycling through line indentation settings. ui.message(_("Report line indentation with tones")) lineIndentationSpeech = False lineIndentationTones = True elif not lineIndentationSpeech and lineIndentationTones: # Translators: A message reported when cycling through line indentation settings. ui.message(_("Report line indentation with speech and tones")) lineIndentationSpeech = True else: # Translators: A message reported when cycling through line indentation settings. ui.message(_("Report line indentation off")) lineIndentationSpeech = False lineIndentationTones = False config.conf["documentFormatting"]["reportLineIndentation"] = lineIndentationSpeech config.conf["documentFormatting"]["reportLineIndentationWithTones"] = lineIndentationTones # Translators: Input help mode message for toggle report line indentation command. script_toggleReportLineIndentation.__doc__=_("Cycles through line indentation settings") script_toggleReportLineIndentation.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportParagraphIndentation(self,gesture): if config.conf["documentFormatting"]["reportParagraphIndentation"]: # Translators: The message announced when toggling the report paragraph indentation document formatting setting. state = _("report paragraph indentation off") config.conf["documentFormatting"]["reportParagraphIndentation"]=False else: # Translators: The message announced when toggling the report paragraph indentation document formatting setting. state = _("report paragraph indentation on") config.conf["documentFormatting"]["reportParagraphIndentation"]=True ui.message(state) # Translators: Input help mode message for toggle report paragraph indentation command. script_toggleReportParagraphIndentation.__doc__=_("Toggles on and off the reporting of paragraph indentation") script_toggleReportParagraphIndentation.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportLineSpacing(self,gesture): if config.conf["documentFormatting"]["reportLineSpacing"]: # Translators: The message announced when toggling the report line spacing document formatting setting. state = _("report line spacing off") config.conf["documentFormatting"]["reportLineSpacing"]=False else: # Translators: The message announced when toggling the report line spacing document formatting setting. state = _("report line spacing on") config.conf["documentFormatting"]["reportLineSpacing"]=True ui.message(state) # Translators: Input help mode message for toggle report line spacing command. script_toggleReportLineSpacing.__doc__=_("Toggles on and off the reporting of line spacing") script_toggleReportLineSpacing.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportTables(self,gesture): if config.conf["documentFormatting"]["reportTables"]: # Translators: The message announced when toggling the report tables document formatting setting. state = _("report tables off") config.conf["documentFormatting"]["reportTables"]=False else: # Translators: The message announced when toggling the report tables document formatting setting. state = _("report tables on") config.conf["documentFormatting"]["reportTables"]=True ui.message(state) # Translators: Input help mode message for toggle report tables command. script_toggleReportTables.__doc__=_("Toggles on and off the reporting of tables") script_toggleReportTables.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportTableHeaders(self,gesture): if config.conf["documentFormatting"]["reportTableHeaders"]: # Translators: The message announced when toggling the report table row/column headers document formatting setting. state = _("report table row and column headers off") config.conf["documentFormatting"]["reportTableHeaders"]=False else: # Translators: The message announced when toggling the report table row/column headers document formatting setting. state = _("report table row and column headers on") config.conf["documentFormatting"]["reportTableHeaders"]=True ui.message(state) # Translators: Input help mode message for toggle report table row/column headers command. script_toggleReportTableHeaders.__doc__=_("Toggles on and off the reporting of table row and column headers") script_toggleReportTableHeaders.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportTableCellCoords(self,gesture): if config.conf["documentFormatting"]["reportTableCellCoords"]: # Translators: The message announced when toggling the report table cell coordinates document formatting setting. state = _("report table cell coordinates off") config.conf["documentFormatting"]["reportTableCellCoords"]=False else: # Translators: The message announced when toggling the report table cell coordinates document formatting setting. state = _("report table cell coordinates on") config.conf["documentFormatting"]["reportTableCellCoords"]=True ui.message(state) # Translators: Input help mode message for toggle report table cell coordinates command. script_toggleReportTableCellCoords.__doc__=_("Toggles on and off the reporting of table cell coordinates") script_toggleReportTableCellCoords.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportLinks(self,gesture): if config.conf["documentFormatting"]["reportLinks"]: # Translators: The message announced when toggling the report links document formatting setting. state = _("report links off") config.conf["documentFormatting"]["reportLinks"]=False else: # Translators: The message announced when toggling the report links document formatting setting. state = _("report links on") config.conf["documentFormatting"]["reportLinks"]=True ui.message(state) # Translators: Input help mode message for toggle report links command. script_toggleReportLinks.__doc__=_("Toggles on and off the reporting of links") script_toggleReportLinks.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportComments(self,gesture): if config.conf["documentFormatting"]["reportComments"]: # Translators: The message announced when toggling the report comments document formatting setting. state = _("report comments off") config.conf["documentFormatting"]["reportComments"]=False else: # Translators: The message announced when toggling the report comments document formatting setting. state = _("report comments on") config.conf["documentFormatting"]["reportComments"]=True ui.message(state) # Translators: Input help mode message for toggle report comments command. script_toggleReportComments.__doc__=_("Toggles on and off the reporting of comments") script_toggleReportComments.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportLists(self,gesture): if config.conf["documentFormatting"]["reportLists"]: # Translators: The message announced when toggling the report lists document formatting setting. state = _("report lists off") config.conf["documentFormatting"]["reportLists"]=False else: # Translators: The message announced when toggling the report lists document formatting setting. state = _("report lists on") config.conf["documentFormatting"]["reportLists"]=True ui.message(state) # Translators: Input help mode message for toggle report lists command. script_toggleReportLists.__doc__=_("Toggles on and off the reporting of lists") script_toggleReportLists.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportHeadings(self,gesture): if config.conf["documentFormatting"]["reportHeadings"]: # Translators: The message announced when toggling the report headings document formatting setting. state = _("report headings off") config.conf["documentFormatting"]["reportHeadings"]=False else: # Translators: The message announced when toggling the report headings document formatting setting. state = _("report headings on") config.conf["documentFormatting"]["reportHeadings"]=True ui.message(state) # Translators: Input help mode message for toggle report headings command. script_toggleReportHeadings.__doc__=_("Toggles on and off the reporting of headings") script_toggleReportHeadings.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportBlockQuotes(self,gesture): if config.conf["documentFormatting"]["reportBlockQuotes"]: # Translators: The message announced when toggling the report block quotes document formatting setting. state = _("report block quotes off") config.conf["documentFormatting"]["reportBlockQuotes"]=False else: # Translators: The message announced when toggling the report block quotes document formatting setting. state = _("report block quotes on") config.conf["documentFormatting"]["reportBlockQuotes"]=True ui.message(state) # Translators: Input help mode message for toggle report block quotes command. script_toggleReportBlockQuotes.__doc__=_("Toggles on and off the reporting of block quotes") script_toggleReportBlockQuotes.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportLandmarks(self,gesture): if config.conf["documentFormatting"]["reportLandmarks"]: # Translators: The message announced when toggling the report landmarks document formatting setting. state = _("report landmarks off") config.conf["documentFormatting"]["reportLandmarks"]=False else: # Translators: The message announced when toggling the report landmarks document formatting setting. state = _("report landmarks on") config.conf["documentFormatting"]["reportLandmarks"]=True ui.message(state) # Translators: Input help mode message for toggle report landmarks command. script_toggleReportLandmarks.__doc__=_("Toggles on and off the reporting of landmarks") script_toggleReportLandmarks.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportFrames(self,gesture): if config.conf["documentFormatting"]["reportFrames"]: # Translators: The message announced when toggling the report frames document formatting setting. state = _("report frames off") config.conf["documentFormatting"]["reportFrames"]=False else: # Translators: The message announced when toggling the report frames document formatting setting. state = _("report frames on") config.conf["documentFormatting"]["reportFrames"]=True ui.message(state) # Translators: Input help mode message for toggle report frames command. script_toggleReportFrames.__doc__=_("Toggles on and off the reporting of frames") script_toggleReportFrames.category=SCRCAT_DOCUMENTFORMATTING def script_toggleReportClickable(self,gesture): if config.conf["documentFormatting"]["reportClickable"]: # Translators: The message announced when toggling the report if clickable document formatting setting. state = _("report if clickable off") config.conf["documentFormatting"]["reportClickable"]=False else: # Translators: The message announced when toggling the report if clickable document formatting setting. state = _("report if clickable on") config.conf["documentFormatting"]["reportClickable"]=True ui.message(state) # Translators: Input help mode message for toggle report if clickable command. script_toggleReportClickable.__doc__=_("Toggles on and off reporting if clickable") script_toggleReportClickable.category=SCRCAT_DOCUMENTFORMATTING def script_cycleSpeechSymbolLevel(self,gesture): curLevel = config.conf["speech"]["symbolLevel"] for level in characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS: if level > curLevel: break else: level = characterProcessing.SYMLVL_NONE name = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS[level] config.conf["speech"]["symbolLevel"] = level # Translators: Reported when the user cycles through speech symbol levels # which determine what symbols are spoken. # %s will be replaced with the symbol level; e.g. none, some, most and all. ui.message(_("Symbol level %s") % name) # Translators: Input help mode message for cycle speech symbol level command. script_cycleSpeechSymbolLevel.__doc__=_("Cycles through speech symbol levels which determine what symbols are spoken") script_cycleSpeechSymbolLevel.category=SCRCAT_SPEECH def script_moveMouseToNavigatorObject(self,gesture): try: p=api.getReviewPosition().pointAtStart except (NotImplementedError, LookupError): p=None if p: x=p.x y=p.y else: try: (left,top,width,height)=api.getNavigatorObject().location except: # Translators: Reported when the object has no location for the mouse to move to it. ui.message(_("Object has no location")) return x=left+(width//2) y=top+(height//2) winUser.setCursorPos(x,y) mouseHandler.executeMouseMoveEvent(x,y) # Translators: Input help mode message for move mouse to navigator object command. script_moveMouseToNavigatorObject.__doc__=_("Moves the mouse pointer to the current navigator object") script_moveMouseToNavigatorObject.category=SCRCAT_MOUSE def script_moveNavigatorObjectToMouse(self,gesture): # Translators: Reported when attempting to move the navigator object to the object under mouse pointer. ui.message(_("Move navigator object to mouse")) obj=api.getMouseObject() api.setNavigatorObject(obj) speech.speakObject(obj) # Translators: Input help mode message for move navigator object to mouse command. script_moveNavigatorObjectToMouse.__doc__=_("Sets the navigator object to the current object under the mouse pointer and speaks it") script_moveNavigatorObjectToMouse.category=SCRCAT_MOUSE def script_reviewMode_next(self,gesture): label=review.nextMode() if label: ui.reviewMessage(label) pos=api.getReviewPosition().copy() pos.expand(textInfos.UNIT_LINE) braille.handler.setTether(braille.handler.TETHER_REVIEW, auto=True) speech.speakTextInfo(pos) else: # Translators: reported when there are no other available review modes for this object ui.reviewMessage(_("No next review mode")) # Translators: Script help message for next review mode command. script_reviewMode_next.__doc__=_("Switches to the next review mode (e.g. object, document or screen) and positions the review position at the point of the navigator object") script_reviewMode_next.category=SCRCAT_TEXTREVIEW def script_reviewMode_previous(self,gesture): label=review.nextMode(prev=True) if label: ui.reviewMessage(label) pos=api.getReviewPosition().copy() pos.expand(textInfos.UNIT_LINE) braille.handler.setTether(braille.handler.TETHER_REVIEW, auto=True) speech.speakTextInfo(pos) else: # Translators: reported when there are no other available review modes for this object ui.reviewMessage(_("No previous review mode")) # Translators: Script help message for previous review mode command. script_reviewMode_previous.__doc__=_("Switches to the previous review mode (e.g. object, document or screen) and positions the review position at the point of the navigator object") script_reviewMode_previous.category=SCRCAT_TEXTREVIEW def script_toggleSimpleReviewMode(self,gesture): if config.conf["reviewCursor"]["simpleReviewMode"]: # Translators: The message announced when toggling simple review mode. state = _("Simple review mode off") config.conf["reviewCursor"]["simpleReviewMode"]=False else: # Translators: The message announced when toggling simple review mode. state = _("Simple review mode on") config.conf["reviewCursor"]["simpleReviewMode"]=True ui.message(state) # Translators: Input help mode message for toggle simple review mode command. script_toggleSimpleReviewMode.__doc__=_("Toggles simple review mode on and off") script_toggleSimpleReviewMode.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_current(self,gesture): curObject=api.getNavigatorObject() if not isinstance(curObject,NVDAObject): # Translators: Reported when the user tries to perform a command related to the navigator object # but there is no current navigator object. ui.reviewMessage(_("No navigator object")) return if scriptHandler.getLastScriptRepeatCount()>=1: if curObject.TextInfo!=NVDAObjectTextInfo: textList=[] name = curObject.name if isinstance(name, str) and not name.isspace(): textList.append(name) try: info=curObject.makeTextInfo(textInfos.POSITION_SELECTION) if not info.isCollapsed: textList.append(info.text) else: info.expand(textInfos.UNIT_LINE) if not info.isCollapsed: textList.append(info.text) except (RuntimeError, NotImplementedError): # No caret or selection on this object. pass else: textList=[] for prop in (curObject.name, curObject.value): if isinstance(prop,str) and not prop.isspace(): textList.append(prop) text=" ".join(textList) if len(text)>0 and not text.isspace(): if scriptHandler.getLastScriptRepeatCount()==1: speech.speakSpelling(text) else: if api.copyToClip(text): # Translators: Indicates something has been copied to clipboard (example output: title text copied to clipboard). speech.speakMessage(_("%s copied to clipboard")%text) else: speech.speakObject(curObject,reason=controlTypes.REASON_QUERY) # Translators: Input help mode message for report current navigator object command. script_navigatorObject_current.__doc__=_("Reports the current navigator object. Pressing twice spells this information, and pressing three times Copies name and value of this object to the clipboard") script_navigatorObject_current.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_currentDimensions(self,gesture): count=scriptHandler.getLastScriptRepeatCount() locationText=api.getReviewPosition().locationText if count==0 else None if not locationText: locationText=api.getNavigatorObject().locationText if not locationText: # Translators: message when there is no location information for the review cursor ui.message(_("No location information")) return ui.message(locationText) # Translators: Description for report review cursor location command. script_navigatorObject_currentDimensions.__doc__=_("Reports information about the location of the text or object at the review cursor. Pressing twice may provide further detail.") script_navigatorObject_currentDimensions.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_toFocus(self,gesture): obj=api.getFocusObject() try: pos=obj.makeTextInfo(textInfos.POSITION_CARET) except (NotImplementedError,RuntimeError): pos=obj.makeTextInfo(textInfos.POSITION_FIRST) api.setReviewPosition(pos) # Translators: Reported when attempting to move the navigator object to focus. speech.speakMessage(_("Move to focus")) speech.speakObject(obj,reason=controlTypes.REASON_FOCUS) # Translators: Input help mode message for move navigator object to current focus command. script_navigatorObject_toFocus.__doc__=_("Sets the navigator object to the current focus, and the review cursor to the position of the caret inside it, if possible.") script_navigatorObject_toFocus.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_moveFocus(self,gesture): obj=api.getNavigatorObject() if not isinstance(obj,NVDAObject): # Translators: Reported when: # 1. There is no focusable object e.g. cannot use tab and shift tab to move to controls. # 2. Trying to move focus to navigator object but there is no focus. ui.message(_("No focus")) if scriptHandler.getLastScriptRepeatCount()==0: # Translators: Reported when attempting to move focus to navigator object. ui.message(_("Move focus")) obj.setFocus() else: review=api.getReviewPosition() try: review.updateCaret() except NotImplementedError: # Translators: Reported when trying to move caret to the position of the review cursor but there is no caret. ui.message(_("No caret")) return info=review.copy() info.expand(textInfos.UNIT_LINE) speech.speakTextInfo(info,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move focus to current navigator object command. script_navigatorObject_moveFocus.__doc__=_("Pressed once sets the keyboard focus to the navigator object, pressed twice sets the system caret to the position of the review cursor") script_navigatorObject_moveFocus.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_parent(self,gesture): curObject=api.getNavigatorObject() if not isinstance(curObject,NVDAObject): # Translators: Reported when the user tries to perform a command related to the navigator object # but there is no current navigator object. ui.reviewMessage(_("No navigator object")) return simpleReviewMode=config.conf["reviewCursor"]["simpleReviewMode"] curObject=curObject.simpleParent if simpleReviewMode else curObject.parent if curObject is not None: api.setNavigatorObject(curObject) speech.speakObject(curObject,reason=controlTypes.REASON_FOCUS) else: # Translators: Reported when there is no containing (parent) object such as when focused on desktop. ui.reviewMessage(_("No containing object")) # Translators: Input help mode message for move to parent object command. script_navigatorObject_parent.__doc__=_("Moves the navigator object to the object containing it") script_navigatorObject_parent.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_next(self,gesture): curObject=api.getNavigatorObject() if not isinstance(curObject,NVDAObject): # Translators: Reported when the user tries to perform a command related to the navigator object # but there is no current navigator object. ui.reviewMessage(_("No navigator object")) return simpleReviewMode=config.conf["reviewCursor"]["simpleReviewMode"] curObject=curObject.simpleNext if simpleReviewMode else curObject.next if curObject is not None: api.setNavigatorObject(curObject) speech.speakObject(curObject,reason=controlTypes.REASON_FOCUS) else: # Translators: Reported when there is no next object (current object is the last object). ui.reviewMessage(_("No next")) # Translators: Input help mode message for move to next object command. script_navigatorObject_next.__doc__=_("Moves the navigator object to the next object") script_navigatorObject_next.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_previous(self,gesture): curObject=api.getNavigatorObject() if not isinstance(curObject,NVDAObject): # Translators: Reported when the user tries to perform a command related to the navigator object # but there is no current navigator object. ui.reviewMessage(_("No navigator object")) return simpleReviewMode=config.conf["reviewCursor"]["simpleReviewMode"] curObject=curObject.simplePrevious if simpleReviewMode else curObject.previous if curObject is not None: api.setNavigatorObject(curObject) speech.speakObject(curObject,reason=controlTypes.REASON_FOCUS) else: # Translators: Reported when there is no previous object (current object is the first object). ui.reviewMessage(_("No previous")) # Translators: Input help mode message for move to previous object command. script_navigatorObject_previous.__doc__=_("Moves the navigator object to the previous object") script_navigatorObject_previous.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_firstChild(self,gesture): curObject=api.getNavigatorObject() if not isinstance(curObject,NVDAObject): # Translators: Reported when the user tries to perform a command related to the navigator object # but there is no current navigator object. ui.reviewMessage(_("No navigator object")) return simpleReviewMode=config.conf["reviewCursor"]["simpleReviewMode"] curObject=curObject.simpleFirstChild if simpleReviewMode else curObject.firstChild if curObject is not None: api.setNavigatorObject(curObject) speech.speakObject(curObject,reason=controlTypes.REASON_FOCUS) else: # Translators: Reported when there is no contained (first child) object such as inside a document. ui.reviewMessage(_("No objects inside")) # Translators: Input help mode message for move to first child object command. script_navigatorObject_firstChild.__doc__=_("Moves the navigator object to the first object inside it") script_navigatorObject_firstChild.category=SCRCAT_OBJECTNAVIGATION def script_review_activate(self,gesture): # Translators: a message reported when the action at the position of the review cursor or navigator object is performed. actionName=_("Activate") pos=api.getReviewPosition() try: pos.activate() if isinstance(gesture,touchHandler.TouchInputGesture): touchHandler.handler.notifyInteraction(pos.NVDAObjectAtStart) ui.message(actionName) return except NotImplementedError: pass obj=api.getNavigatorObject() while obj: realActionName=actionName try: realActionName=obj.getActionName() except: pass try: obj.doAction() if isinstance(gesture,touchHandler.TouchInputGesture): touchHandler.handler.notifyInteraction(obj) ui.message(realActionName) return except NotImplementedError: pass obj=obj.parent # Translators: the message reported when there is no action to perform on the review position or navigator object. ui.message(_("No action")) # Translators: Input help mode message for activate current object command. script_review_activate.__doc__=_("Performs the default action on the current navigator object (example: presses it if it is a button).") script_review_activate.category=SCRCAT_OBJECTNAVIGATION def script_review_top(self,gesture): info=api.getReviewPosition().obj.makeTextInfo(textInfos.POSITION_FIRST) api.setReviewPosition(info) info.expand(textInfos.UNIT_LINE) ui.reviewMessage(_("Top")) speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to top line command. script_review_top.__doc__=_("Moves the review cursor to the top line of the current navigator object and speaks it") script_review_top.category=SCRCAT_TEXTREVIEW def script_review_previousLine(self,gesture): info=api.getReviewPosition().copy() if info._expandCollapseBeforeReview: info.expand(textInfos.UNIT_LINE) info.collapse() res=info.move(textInfos.UNIT_LINE,-1) if res==0: # Translators: a message reported when review cursor is at the top line of the current navigator object. ui.reviewMessage(_("Top")) else: api.setReviewPosition(info) info.expand(textInfos.UNIT_LINE) speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to previous line command. script_review_previousLine.__doc__=_("Moves the review cursor to the previous line of the current navigator object and speaks it") script_review_previousLine.resumeSayAllMode=sayAllHandler.CURSOR_REVIEW script_review_previousLine.category=SCRCAT_TEXTREVIEW def script_review_currentLine(self,gesture): info=api.getReviewPosition().copy() info.expand(textInfos.UNIT_LINE) # Explicitly tether here braille.handler.setTether(braille.handler.TETHER_REVIEW, auto=True) scriptCount=scriptHandler.getLastScriptRepeatCount() if scriptCount==0: speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET) else: speech.spellTextInfo(info,useCharacterDescriptions=scriptCount>1) # Translators: Input help mode message for read current line under review cursor command. script_review_currentLine.__doc__=_("Reports the line of the current navigator object where the review cursor is situated. If this key is pressed twice, the current line will be spelled. Pressing three times will spell the line using character descriptions.") script_review_currentLine.category=SCRCAT_TEXTREVIEW def script_review_nextLine(self,gesture): info=api.getReviewPosition().copy() if info._expandCollapseBeforeReview: info.expand(textInfos.UNIT_LINE) info.collapse() res=info.move(textInfos.UNIT_LINE,1) if res==0: # Translators: a message reported when review cursor is at the bottom line of the current navigator object. ui.reviewMessage(_("Bottom")) else: api.setReviewPosition(info) info.expand(textInfos.UNIT_LINE) speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to next line command. script_review_nextLine.__doc__=_("Moves the review cursor to the next line of the current navigator object and speaks it") script_review_nextLine.resumeSayAllMode=sayAllHandler.CURSOR_REVIEW script_review_nextLine.category=SCRCAT_TEXTREVIEW def script_review_bottom(self,gesture): info=api.getReviewPosition().obj.makeTextInfo(textInfos.POSITION_LAST) api.setReviewPosition(info) info.expand(textInfos.UNIT_LINE) ui.reviewMessage(_("Bottom")) speech.speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to bottom line command. script_review_bottom.__doc__=_("Moves the review cursor to the bottom line of the current navigator object and speaks it") script_review_bottom.category=SCRCAT_TEXTREVIEW def script_review_previousWord(self,gesture): info=api.getReviewPosition().copy() if info._expandCollapseBeforeReview: info.expand(textInfos.UNIT_WORD) info.collapse() res=info.move(textInfos.UNIT_WORD,-1) if res==0: # Translators: a message reported when review cursor is at the top line of the current navigator object. ui.reviewMessage(_("Top")) else: api.setReviewPosition(info) info.expand(textInfos.UNIT_WORD) speech.speakTextInfo(info,reason=controlTypes.REASON_CARET,unit=textInfos.UNIT_WORD) # Translators: Input help mode message for move review cursor to previous word command. script_review_previousWord.__doc__=_("Moves the review cursor to the previous word of the current navigator object and speaks it") script_review_previousWord.category=SCRCAT_TEXTREVIEW def script_review_currentWord(self,gesture): info=api.getReviewPosition().copy() info.expand(textInfos.UNIT_WORD) # Explicitly tether here braille.handler.setTether(braille.handler.TETHER_REVIEW, auto=True) scriptCount=scriptHandler.getLastScriptRepeatCount() if scriptCount==0: speech.speakTextInfo(info,reason=controlTypes.REASON_CARET,unit=textInfos.UNIT_WORD) else: speech.spellTextInfo(info,useCharacterDescriptions=scriptCount>1) # Translators: Input help mode message for report current word under review cursor command. script_review_currentWord.__doc__=_("Speaks the word of the current navigator object where the review cursor is situated. Pressing twice spells the word. Pressing three times spells the word using character descriptions") script_review_currentWord.category=SCRCAT_TEXTREVIEW def script_review_nextWord(self,gesture): info=api.getReviewPosition().copy() if info._expandCollapseBeforeReview: info.expand(textInfos.UNIT_WORD) info.collapse() res=info.move(textInfos.UNIT_WORD,1) if res==0: # Translators: a message reported when review cursor is at the bottom line of the current navigator object. ui.reviewMessage(_("Bottom")) else: api.setReviewPosition(info) info.expand(textInfos.UNIT_WORD) speech.speakTextInfo(info,reason=controlTypes.REASON_CARET,unit=textInfos.UNIT_WORD) # Translators: Input help mode message for move review cursor to next word command. script_review_nextWord.__doc__=_("Moves the review cursor to the next word of the current navigator object and speaks it") script_review_nextWord.category=SCRCAT_TEXTREVIEW def script_review_startOfLine(self,gesture): info=api.getReviewPosition().copy() info.expand(textInfos.UNIT_LINE) info.collapse() api.setReviewPosition(info) info.expand(textInfos.UNIT_CHARACTER) ui.reviewMessage(_("Left")) speech.speakTextInfo(info,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to start of current line command. script_review_startOfLine.__doc__=_("Moves the review cursor to the first character of the line where it is situated in the current navigator object and speaks it") script_review_startOfLine.category=SCRCAT_TEXTREVIEW def script_review_previousCharacter(self,gesture): lineInfo=api.getReviewPosition().copy() lineInfo.expand(textInfos.UNIT_LINE) charInfo=api.getReviewPosition().copy() if charInfo._expandCollapseBeforeReview: charInfo.expand(textInfos.UNIT_CHARACTER) charInfo.collapse() res=charInfo.move(textInfos.UNIT_CHARACTER,-1) if res==0 or charInfo.compareEndPoints(lineInfo,"startToStart")<0: # Translators: a message reported when review cursor is at the leftmost character of the current navigator object's text. ui.reviewMessage(_("Left")) reviewInfo=api.getReviewPosition().copy() reviewInfo.expand(textInfos.UNIT_CHARACTER) speech.speakTextInfo(reviewInfo,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) else: api.setReviewPosition(charInfo) charInfo.expand(textInfos.UNIT_CHARACTER) speech.speakTextInfo(charInfo,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to previous character command. script_review_previousCharacter.__doc__=_("Moves the review cursor to the previous character of the current navigator object and speaks it") script_review_previousCharacter.category=SCRCAT_TEXTREVIEW def script_review_currentCharacter(self,gesture): info=api.getReviewPosition().copy() info.expand(textInfos.UNIT_CHARACTER) # Explicitly tether here braille.handler.setTether(braille.handler.TETHER_REVIEW, auto=True) scriptCount=scriptHandler.getLastScriptRepeatCount() if scriptCount==0: speech.speakTextInfo(info,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) elif scriptCount==1: speech.spellTextInfo(info,useCharacterDescriptions=True) else: try: c = ord(info.text) except TypeError: c = None if c is not None: speech.speakMessage("%d," % c) speech.speakSpelling(hex(c)) else: log.debugWarning("Couldn't calculate ordinal for character %r" % info.text) speech.speakTextInfo(info,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for report current character under review cursor command. script_review_currentCharacter.__doc__=_("Reports the character of the current navigator object where the review cursor is situated. Pressing twice reports a description or example of that character. Pressing three times reports the numeric value of the character in decimal and hexadecimal") script_review_currentCharacter.category=SCRCAT_TEXTREVIEW def script_review_nextCharacter(self,gesture): lineInfo=api.getReviewPosition().copy() lineInfo.expand(textInfos.UNIT_LINE) charInfo=api.getReviewPosition().copy() if charInfo._expandCollapseBeforeReview: charInfo.expand(textInfos.UNIT_CHARACTER) charInfo.collapse() res=charInfo.move(textInfos.UNIT_CHARACTER,1) if res==0 or charInfo.compareEndPoints(lineInfo,"endToEnd")>=0: # Translators: a message reported when review cursor is at the rightmost character of the current navigator object's text. ui.reviewMessage(_("Right")) reviewInfo=api.getReviewPosition().copy() reviewInfo.expand(textInfos.UNIT_CHARACTER) speech.speakTextInfo(reviewInfo,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) else: api.setReviewPosition(charInfo) charInfo.expand(textInfos.UNIT_CHARACTER) speech.speakTextInfo(charInfo,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to next character command. script_review_nextCharacter.__doc__=_("Moves the review cursor to the next character of the current navigator object and speaks it") script_review_nextCharacter.category=SCRCAT_TEXTREVIEW def script_review_endOfLine(self,gesture): info=api.getReviewPosition().copy() info.expand(textInfos.UNIT_LINE) info.collapse(end=True) info.move(textInfos.UNIT_CHARACTER,-1) api.setReviewPosition(info) info.expand(textInfos.UNIT_CHARACTER) ui.reviewMessage(_("Right")) speech.speakTextInfo(info,unit=textInfos.UNIT_CHARACTER,reason=controlTypes.REASON_CARET) # Translators: Input help mode message for move review cursor to end of current line command. script_review_endOfLine.__doc__=_("Moves the review cursor to the last character of the line where it is situated in the current navigator object and speaks it") script_review_endOfLine.category=SCRCAT_TEXTREVIEW def _getCurrentLanguageForTextInfo(self, info): curLanguage = None if config.conf['speech']['autoLanguageSwitching']: for field in info.getTextWithFields({}): if isinstance(field, textInfos.FieldCommand) and field.command == "formatChange": curLanguage = field.field.get('language') if curLanguage is None: curLanguage = speech.getCurrentLanguage() return curLanguage @script( # Translators: Input help mode message for Review Current Symbol command. description=_("Reports the symbol where the review cursor is positioned. Pressed twice, shows the symbol and the text used to speak it in browse mode"), category=SCRCAT_TEXTREVIEW, ) def script_review_currentSymbol(self,gesture): info=api.getReviewPosition().copy() info.expand(textInfos.UNIT_CHARACTER) curLanguage = self._getCurrentLanguageForTextInfo(info) text = info.text expandedSymbol = characterProcessing.processSpeechSymbol(curLanguage, text) if expandedSymbol == text: # Translators: Reported when there is no replacement for the symbol at the position of the review cursor. ui.message(_("No symbol replacement")) return repeats=scriptHandler.getLastScriptRepeatCount() if repeats == 0: ui.message(expandedSymbol) else: # Translators: Character and its replacement used from the "Review current Symbol" command. Example: "Character: ? Replacement: question" message = _("Character: {}\nReplacement: {}").format(text, expandedSymbol) languageDescription = languageHandler.getLanguageDescription(curLanguage) # Translators: title for expanded symbol dialog. Example: "Expanded symbol (English)" title = _("Expanded symbol ({})").format(languageDescription) ui.browseableMessage(message, title) def script_speechMode(self,gesture): curMode=speech.speechMode speech.speechMode=speech.speechMode_talk newMode=(curMode+1)%3 if newMode==speech.speechMode_off: # Translators: A speech mode which disables speech output. name=_("Speech mode off") elif newMode==speech.speechMode_beeps: # Translators: A speech mode which will cause NVDA to beep instead of speaking. name=_("Speech mode beeps") elif newMode==speech.speechMode_talk: # Translators: The normal speech mode; i.e. NVDA will talk as normal. name=_("Speech mode talk") speech.cancelSpeech() ui.message(name) speech.speechMode=newMode # Translators: Input help mode message for toggle speech mode command. script_speechMode.__doc__=_("Toggles between the speech modes of off, beep and talk. When set to off NVDA will not speak anything. If beeps then NVDA will simply beep each time it its supposed to speak something. If talk then NVDA wil just speak normally.") script_speechMode.category=SCRCAT_SPEECH def script_moveToParentTreeInterceptor(self,gesture): obj=api.getFocusObject() parent=obj.parent #Move up parents until the tree interceptor of the parent is different to the tree interceptor of the object. #Note that this could include the situation where the parent has no tree interceptor but the object did. while parent and parent.treeInterceptor==obj.treeInterceptor: parent=parent.parent #If the parent has no tree interceptor, keep moving up the parents until we find a parent that does have one. while parent and not parent.treeInterceptor: parent=parent.parent if parent: parent.treeInterceptor.rootNVDAObject.setFocus() import eventHandler import wx # We must use core.callLater rather than wx.CallLater to ensure that the callback runs within NVDA's core pump. # If it didn't, and it directly or indirectly called wx.Yield, it could start executing NVDA's core pump from within the yield, causing recursion. core.callLater(50,eventHandler.executeEvent,"gainFocus",parent.treeInterceptor.rootNVDAObject) # Translators: Input help mode message for move to next document with focus command, mostly used in web browsing to move from embedded object to the webpage document. script_moveToParentTreeInterceptor.__doc__=_("Moves the focus to the next closest document that contains the focus") script_moveToParentTreeInterceptor.category=SCRCAT_FOCUS def script_toggleVirtualBufferPassThrough(self,gesture): focus = api.getFocusObject() vbuf = focus.treeInterceptor if not vbuf: for obj in itertools.chain((api.getFocusObject(),), reversed(api.getFocusAncestors())): try: obj.treeInterceptorClass except: continue break else: return # Force the tree interceptor to be created. ti = treeInterceptorHandler.update(obj, force=True) if not ti: return if focus in ti: # Update the focus, as it will have cached that there is no tree interceptor. focus.treeInterceptor = ti # If we just happened to create a browse mode TreeInterceptor # Then ensure that browse mode is reported here. From the users point of view, browse mode was turned on. if isinstance(ti,browseMode.BrowseModeTreeInterceptor) and not ti.passThrough: browseMode.reportPassThrough(ti,False) # #8716: Only let braille handle the focus when the tree interceptor is ready. # If not ready (e.g. a loading virtual buffer), # the buffer will take responsibility to update braille as soon as it completed loading. if ti.isReady: braille.handler.handleGainFocus(ti) return if not isinstance(vbuf, browseMode.BrowseModeTreeInterceptor): return # Toggle browse mode pass-through. vbuf.passThrough = not vbuf.passThrough if isinstance(vbuf,browseMode.BrowseModeTreeInterceptor): # If we are enabling pass-through, the user has explicitly chosen to do so, so disable auto-pass-through. # If we're disabling pass-through, re-enable auto-pass-through. vbuf.disableAutoPassThrough = vbuf.passThrough browseMode.reportPassThrough(vbuf) # Translators: Input help mode message for toggle focus and browse mode command in web browsing and other situations. script_toggleVirtualBufferPassThrough.__doc__=_("Toggles between browse mode and focus mode. When in focus mode, keys will pass straight through to the application, allowing you to interact directly with a control. When in browse mode, you can navigate the document with the cursor, quick navigation keys, etc.") script_toggleVirtualBufferPassThrough.category=inputCore.SCRCAT_BROWSEMODE def script_quit(self,gesture): gui.quit() # Translators: Input help mode message for quit NVDA command. script_quit.__doc__=_("Quits NVDA!") def script_restart(self,gesture): core.restart() # Translators: Input help mode message for restart NVDA command. script_restart.__doc__=_("Restarts NVDA!") def script_showGui(self,gesture): gui.showGui() # Translators: Input help mode message for show NVDA menu command. script_showGui.__doc__=_("Shows the NVDA menu") def script_review_sayAll(self,gesture): sayAllHandler.readText(sayAllHandler.CURSOR_REVIEW) # Translators: Input help mode message for say all in review cursor command. script_review_sayAll.__doc__ = _("Reads from the review cursor up to end of current text, moving the review cursor as it goes") script_review_sayAll.category=SCRCAT_TEXTREVIEW def script_sayAll(self,gesture): sayAllHandler.readText(sayAllHandler.CURSOR_CARET) # Translators: Input help mode message for say all with system caret command. script_sayAll.__doc__ = _("Reads from the system caret up to the end of the text, moving the caret as it goes") script_sayAll.category=SCRCAT_SYSTEMCARET def _reportFormattingHelper(self, info, browseable=False): formatConfig={ "detectFormatAfterCursor":False, "reportFontName":True,"reportFontSize":True,"reportFontAttributes":True,"reportColor":True,"reportRevisions":False,"reportEmphasis":False, "reportStyle":True,"reportAlignment":True,"reportSpellingErrors":True, "reportPage":False,"reportLineNumber":False,"reportLineIndentation":True,"reportLineIndentationWithTones":False,"reportParagraphIndentation":True,"reportLineSpacing":True,"reportTables":False, "reportLinks":False,"reportHeadings":False,"reportLists":False, "reportBlockQuotes":False,"reportComments":False, "reportBorderStyle":True,"reportBorderColor":True, } textList=[] # First, fetch indentation. line=info.copy() line.expand(textInfos.UNIT_LINE) indentation,content=speech.splitTextIndentation(line.text) if indentation: textList.append(speech.getIndentationSpeech(indentation, formatConfig)) info=info.copy() info.expand(textInfos.UNIT_CHARACTER) formatField=textInfos.FormatField() for field in info.getTextWithFields(formatConfig): if isinstance(field,textInfos.FieldCommand) and isinstance(field.field,textInfos.FormatField): formatField.update(field.field) if not browseable: text=info.getFormatFieldSpeech(formatField,formatConfig=formatConfig) if formatField else None if text: textList.append(text) if not textList: # Translators: Reported when trying to obtain formatting information (such as font name, indentation and so on) but there is no formatting information for the text under cursor. ui.message(_("No formatting information")) return ui.message(" ".join(textList)) else: text=info.getFormatFieldSpeech(formatField,formatConfig=formatConfig , separator="\n") if formatField else None if text: textList.append(text) if not textList: # Translators: Reported when trying to obtain formatting information (such as font name, indentation and so on) but there is no formatting information for the text under cursor. ui.message(_("No formatting information")) return # Translators: title for formatting information dialog. ui.browseableMessage("\n".join(textList), _("Formatting")) def script_reportFormatting(self,gesture): info=api.getReviewPosition() repeats=scriptHandler.getLastScriptRepeatCount() if repeats==0: self._reportFormattingHelper(info,False) elif repeats==1: self._reportFormattingHelper(info,True) # Translators: Input help mode message for report formatting command. script_reportFormatting.__doc__ = _("Reports formatting info for the current review cursor position within a document. If pressed twice, presents the information in browse mode") script_reportFormatting.category=SCRCAT_TEXTREVIEW def script_reportCurrentFocus(self,gesture): focusObject=api.getFocusObject() if isinstance(focusObject,NVDAObject): if scriptHandler.getLastScriptRepeatCount()==0: speech.speakObject(focusObject, reason=controlTypes.REASON_QUERY) else: speech.speakSpelling(focusObject.name) else: ui.message(_("No focus")) # Translators: Input help mode message for report current focus command. script_reportCurrentFocus.__doc__ = _("Reports the object with focus. If pressed twice, spells the information") script_reportCurrentFocus.category=SCRCAT_FOCUS def script_reportStatusLine(self,gesture): obj = api.getStatusBar() found=False if obj: text = api.getStatusBarText(obj) api.setNavigatorObject(obj) found=True else: info=api.getForegroundObject().flatReviewPosition if info: info.expand(textInfos.UNIT_STORY) info.collapse(True) info.expand(textInfos.UNIT_LINE) text=info.text info.collapse() api.setReviewPosition(info) found=True if not found: # Translators: Reported when there is no status line for the current program or window. ui.message(_("No status line found")) return if scriptHandler.getLastScriptRepeatCount()==0: if not text.strip(): # Translators: Reported when status line exist, but is empty. ui.message(_("no status bar information")) else: ui.message(text) elif scriptHandler.getLastScriptRepeatCount()==1: if not text.strip(): # Translators: Reported when status line exist, but is empty. ui.message(_("no status bar information")) else: speech.speakSpelling(text) else: if not text.strip(): # Translators: Reported when user attempts to copy content of the empty status line. ui.message(_("unable to copy status bar content to clipboard")) else: if api.copyToClip(text): # Translators: The message presented when the status bar is copied to the clipboard. ui.message(_("%s copied to clipboard")%text) # Translators: Input help mode message for report status line text command. script_reportStatusLine.__doc__ = _("Reads the current application status bar and moves the navigator to it. If pressed twice, spells the information. If pressed three times, copies the status bar to the clipboard") script_reportStatusLine.category=SCRCAT_FOCUS def script_toggleMouseTracking(self,gesture): if config.conf["mouse"]["enableMouseTracking"]: # Translators: presented when the mouse tracking is toggled. state = _("Mouse tracking off") config.conf["mouse"]["enableMouseTracking"]=False else: # Translators: presented when the mouse tracking is toggled. state = _("Mouse tracking on") config.conf["mouse"]["enableMouseTracking"]=True ui.message(state) # Translators: Input help mode message for toggle mouse tracking command. script_toggleMouseTracking.__doc__=_("Toggles the reporting of information as the mouse moves") script_toggleMouseTracking.category=SCRCAT_MOUSE def script_toggleMouseTextResolution(self,gesture): values = textInfos.MOUSE_TEXT_RESOLUTION_UNITS labels = [textInfos.unitLabels[x] for x in values] try: index = values.index(config.conf["mouse"]["mouseTextUnit"]) except ValueError: log.debugWarning("Couldn't get current mouse text resolution setting", exc_info=True) default = config.conf.getConfigValidation(("mouse", "mouseTextUnit")).default index = values.index(default) newIndex = (index+1) % len(values) config.conf["mouse"]["mouseTextUnit"]= values[newIndex] # Translators: Reports the new state of the mouse text unit resolution:. # %s will be replaced with the new label. # For example, the full message might be "Mouse text unit resolution character" ui.message(_("Mouse text unit resolution %s")%labels[newIndex]) # Translators: Input help mode message for toggle mouse text unit resolution command. script_toggleMouseTextResolution.__doc__=_("Toggles how much text will be spoken when the mouse moves") script_toggleMouseTextResolution.category=SCRCAT_MOUSE def script_title(self,gesture): obj=api.getForegroundObject() title=obj.name if not isinstance(title,str) or not title or title.isspace(): title=obj.appModule.appName if obj.appModule else None if not isinstance(title,str) or not title or title.isspace(): # Translators: Reported when there is no title text for current program or window. title=_("No title") repeatCount=scriptHandler.getLastScriptRepeatCount() if repeatCount==0: ui.message(title) elif repeatCount==1: speech.speakSpelling(title) else: if api.copyToClip(title): ui.message(_("%s copied to clipboard")%title) # Translators: Input help mode message for report title bar command. script_title.__doc__=_("Reports the title of the current application or foreground window. If pressed twice, spells the title. If pressed three times, copies the title to the clipboard") script_title.category=SCRCAT_FOCUS def script_speakForeground(self,gesture): obj=api.getForegroundObject() if obj: sayAllHandler.readObjects(obj) # Translators: Input help mode message for read foreground object command (usually the foreground window). script_speakForeground.__doc__ = _("Speaks the current foreground object") script_speakForeground.category=SCRCAT_FOCUS def script_test_navigatorDisplayModelText(self,gesture): obj=api.getNavigatorObject() text=obj.displayText speech.speakMessage(text) log.info(text) def script_startWxInspectionTool(self, gesture): import wx.lib.inspection wx.lib.inspection.InspectionTool().Show() script_startWxInspectionTool.__doc__ = _( # Translators: GUI development tool, to get information about the components used in the NVDA GUI "Opens the WX GUI inspection tool. Used to get more information about the state of GUI components." ) script_startWxInspectionTool.category = SCRCAT_TOOLS def script_navigatorObject_devInfo(self,gesture): obj=api.getNavigatorObject() if hasattr(obj, "devInfo"): log.info("Developer info for navigator object:\n%s" % "\n".join(obj.devInfo), activateLogViewer=True) else: log.info("No developer info for navigator object", activateLogViewer=True) # Translators: Input help mode message for developer info for current navigator object command, used by developers to examine technical info on navigator object. This command also serves as a shortcut to open NVDA log viewer. script_navigatorObject_devInfo.__doc__ = _("Logs information about the current navigator object which is useful to developers and activates the log viewer so the information can be examined.") script_navigatorObject_devInfo.category=SCRCAT_TOOLS def script_toggleProgressBarOutput(self,gesture): outputMode=config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"] if outputMode=="both": outputMode="off" # Translators: A mode where no progress bar updates are given. ui.message(_("No progress bar updates")) elif outputMode=="off": outputMode="speak" # Translators: A mode where progress bar updates will be spoken. ui.message(_("Speak progress bar updates")) elif outputMode=="speak": outputMode="beep" # Translators: A mode where beeps will indicate progress bar updates (beeps rise in pitch as progress bar updates). ui.message(_("Beep for progress bar updates")) else: outputMode="both" # Translators: A mode where both speech and beeps will indicate progress bar updates. ui.message(_("Beep and speak progress bar updates")) config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]=outputMode # Translators: Input help mode message for toggle progress bar output command. script_toggleProgressBarOutput.__doc__=_("Toggles between beeps, speech, beeps and speech, and off, for reporting progress bar updates") script_toggleProgressBarOutput.category=SCRCAT_SPEECH def script_toggleReportDynamicContentChanges(self,gesture): if config.conf["presentation"]["reportDynamicContentChanges"]: # Translators: presented when the present dynamic changes is toggled. state = _("report dynamic content changes off") config.conf["presentation"]["reportDynamicContentChanges"]=False else: # Translators: presented when the present dynamic changes is toggled. state = _("report dynamic content changes on") config.conf["presentation"]["reportDynamicContentChanges"]=True ui.message(state) # Translators: Input help mode message for toggle dynamic content changes command. script_toggleReportDynamicContentChanges.__doc__=_("Toggles on and off the reporting of dynamic content changes, such as new text in dos console windows") script_toggleReportDynamicContentChanges.category=SCRCAT_SPEECH def script_toggleCaretMovesReviewCursor(self,gesture): if config.conf["reviewCursor"]["followCaret"]: # Translators: presented when toggled. state = _("caret moves review cursor off") config.conf["reviewCursor"]["followCaret"]=False else: # Translators: presented when toggled. state = _("caret moves review cursor on") config.conf["reviewCursor"]["followCaret"]=True ui.message(state) # Translators: Input help mode message for toggle caret moves review cursor command. script_toggleCaretMovesReviewCursor.__doc__=_("Toggles on and off the movement of the review cursor due to the caret moving.") script_toggleCaretMovesReviewCursor.category=SCRCAT_TEXTREVIEW def script_toggleFocusMovesNavigatorObject(self,gesture): if config.conf["reviewCursor"]["followFocus"]: # Translators: presented when toggled. state = _("focus moves navigator object off") config.conf["reviewCursor"]["followFocus"]=False else: # Translators: presented when toggled. state = _("focus moves navigator object on") config.conf["reviewCursor"]["followFocus"]=True ui.message(state) # Translators: Input help mode message for toggle focus moves navigator object command. script_toggleFocusMovesNavigatorObject.__doc__=_("Toggles on and off the movement of the navigator object due to focus changes") script_toggleFocusMovesNavigatorObject.category=SCRCAT_OBJECTNAVIGATION def script_toggleAutoFocusFocusableElements(self,gesture): if config.conf["virtualBuffers"]["autoFocusFocusableElements"]: # Translators: presented when toggled. state = _("Automatically set system focus to focusable elements off") config.conf["virtualBuffers"]["autoFocusFocusableElements"]=False else: # Translators: presented when toggled. state = _("Automatically set system focus to focusable elements on") config.conf["virtualBuffers"]["autoFocusFocusableElements"]=True ui.message(state) # Translators: Input help mode message for toggle auto focus focusable elements command. script_toggleAutoFocusFocusableElements.__doc__=_("Toggles on and off automatic movement of the system focus due to browse mode commands") script_toggleAutoFocusFocusableElements.category=inputCore.SCRCAT_BROWSEMODE #added by Rui Batista<[email protected]> to implement a battery status script def script_say_battery_status(self,gesture): UNKNOWN_BATTERY_STATUS = 0xFF AC_ONLINE = 0X1 NO_SYSTEM_BATTERY = 0X80 sps = winKernel.SYSTEM_POWER_STATUS() if not winKernel.GetSystemPowerStatus(sps) or sps.BatteryFlag is UNKNOWN_BATTERY_STATUS: log.error("error accessing system power status") return if sps.BatteryFlag & NO_SYSTEM_BATTERY: # Translators: This is presented when there is no battery such as desktop computers and laptops with battery pack removed. ui.message(_("No system battery")) return # Translators: This is presented to inform the user of the current battery percentage. text = _("%d percent") % sps.BatteryLifePercent + " " # Translators: This is presented when AC power is connected such as when recharging a laptop battery. if sps.ACLineStatus & AC_ONLINE: text += _("AC power on") elif sps.BatteryLifeTime!=0xffffffff: # Translators: This is the estimated remaining runtime of the laptop battery. text += _("{hours:d} hours and {minutes:d} minutes remaining") .format(hours=sps.BatteryLifeTime // 3600, minutes=(sps.BatteryLifeTime % 3600) // 60) ui.message(text) # Translators: Input help mode message for report battery status command. script_say_battery_status.__doc__ = _("Reports battery status and time remaining if AC is not plugged in") script_say_battery_status.category=SCRCAT_SYSTEM def script_passNextKeyThrough(self,gesture): keyboardHandler.passNextKeyThrough() # Translators: Spoken to indicate that the next key press will be sent straight to the current program as though NVDA is not running. ui.message(_("Pass next key through")) # Translators: Input help mode message for pass next key through command. script_passNextKeyThrough.__doc__=_("The next key that is pressed will not be handled at all by NVDA, it will be passed directly through to Windows.") script_passNextKeyThrough.category=SCRCAT_INPUT def script_reportAppModuleInfo(self,gesture): focus=api.getFocusObject() message = '' mod=focus.appModule if isinstance(mod,appModuleHandler.AppModule) and type(mod)!=appModuleHandler.AppModule: # Translators: Indicates the name of the appModule for the current program (example output: explorer module is loaded). # This message will not be presented if there is no module for the current program. message = _(" %s module is loaded. ") % mod.appModuleName.split(".")[0] appName=appModuleHandler.getAppNameFromProcessID(focus.processID,True) # Translators: Indicates the name of the current program (example output: explorer.exe is currently running). # Note that it does not give friendly name such as Windows Explorer; it presents the file name of the current application. # For example, the complete message for Windows explorer is: "explorer module is loaded. Explorer.exe is currenty running." message +=_(" %s is currently running.") % appName ui.message(message) # Translators: Input help mode message for report current program name and app module name command. script_reportAppModuleInfo.__doc__ = _("Speaks the filename of the active application along with the name of the currently loaded appModule") script_reportAppModuleInfo.category=SCRCAT_TOOLS def script_activateGeneralSettingsDialog(self, gesture): wx.CallAfter(gui.mainFrame.onGeneralSettingsCommand, None) # Translators: Input help mode message for go to general settings command. script_activateGeneralSettingsDialog.__doc__ = _("Shows NVDA's general settings") script_activateGeneralSettingsDialog.category=SCRCAT_CONFIG def script_activateSynthesizerDialog(self, gesture): wx.CallAfter(gui.mainFrame.onSelectSynthesizerCommand, None) # Translators: Input help mode message for go to select synthesizer command. script_activateSynthesizerDialog.__doc__ = _("Shows the NVDA synthesizer selection dialog") script_activateSynthesizerDialog.category=SCRCAT_CONFIG def script_activateVoiceDialog(self, gesture): wx.CallAfter(gui.mainFrame.onSpeechSettingsCommand, None) # Translators: Input help mode message for go to speech settings command. script_activateVoiceDialog.__doc__ = _("Shows NVDA's speech settings") script_activateVoiceDialog.category=SCRCAT_CONFIG def script_activateBrailleDisplayDialog(self, gesture): wx.CallAfter(gui.mainFrame.onSelectBrailleDisplayCommand, None) # Translators: Input help mode message for go to select braille display command. script_activateBrailleDisplayDialog.__doc__ = _("Shows the NVDA braille display selection dialog") script_activateBrailleDisplayDialog.category=SCRCAT_CONFIG def script_activateBrailleSettingsDialog(self, gesture): wx.CallAfter(gui.mainFrame.onBrailleSettingsCommand, None) # Translators: Input help mode message for go to braille settings command. script_activateBrailleSettingsDialog.__doc__ = _("Shows NVDA's braille settings") script_activateBrailleSettingsDialog.category=SCRCAT_CONFIG def script_activateKeyboardSettingsDialog(self, gesture): wx.CallAfter(gui.mainFrame.onKeyboardSettingsCommand, None) # Translators: Input help mode message for go to keyboard settings command. script_activateKeyboardSettingsDialog.__doc__ = _("Shows NVDA's keyboard settings") script_activateKeyboardSettingsDialog.category=SCRCAT_CONFIG def script_activateMouseSettingsDialog(self, gesture): wx.CallAfter(gui.mainFrame.onMouseSettingsCommand, None) # Translators: Input help mode message for go to mouse settings command. script_activateMouseSettingsDialog.__doc__ = _("Shows NVDA's mouse settings") script_activateMouseSettingsDialog.category=SCRCAT_CONFIG def script_activateReviewCursorDialog(self, gesture): wx.CallAfter(gui.mainFrame.onReviewCursorCommand, None) # Translators: Input help mode message for go to review cursor settings command. script_activateReviewCursorDialog.__doc__ = _("Shows NVDA's review cursor settings") script_activateReviewCursorDialog.category=SCRCAT_CONFIG def script_activateInputCompositionDialog(self, gesture): wx.CallAfter(gui.mainFrame.onInputCompositionCommand, None) # Translators: Input help mode message for go to input composition settings command. script_activateInputCompositionDialog.__doc__ = _("Shows NVDA's input composition settings") script_activateInputCompositionDialog.category=SCRCAT_CONFIG def script_activateObjectPresentationDialog(self, gesture): wx.CallAfter(gui.mainFrame. onObjectPresentationCommand, None) # Translators: Input help mode message for go to object presentation settings command. script_activateObjectPresentationDialog.__doc__ = _("Shows NVDA's object presentation settings") script_activateObjectPresentationDialog.category=SCRCAT_CONFIG def script_activateBrowseModeDialog(self, gesture): wx.CallAfter(gui.mainFrame.onBrowseModeCommand, None) # Translators: Input help mode message for go to browse mode settings command. script_activateBrowseModeDialog.__doc__ = _("Shows NVDA's browse mode settings") script_activateBrowseModeDialog.category=SCRCAT_CONFIG def script_activateDocumentFormattingDialog(self, gesture): wx.CallAfter(gui.mainFrame.onDocumentFormattingCommand, None) # Translators: Input help mode message for go to document formatting settings command. script_activateDocumentFormattingDialog.__doc__ = _("Shows NVDA's document formatting settings") script_activateDocumentFormattingDialog.category=SCRCAT_CONFIG def script_activateDefaultDictionaryDialog(self, gesture): wx.CallAfter(gui.mainFrame.onDefaultDictionaryCommand, None) # Translators: Input help mode message for opening default dictionary dialog. script_activateDefaultDictionaryDialog.__doc__ = _("Shows the NVDA default dictionary dialog") script_activateDefaultDictionaryDialog.category=SCRCAT_CONFIG def script_activateVoiceDictionaryDialog(self, gesture): wx.CallAfter(gui.mainFrame.onVoiceDictionaryCommand, None) # Translators: Input help mode message for opening voice-specific dictionary dialog. script_activateVoiceDictionaryDialog.__doc__ = _("Shows the NVDA voice-specific dictionary dialog") script_activateVoiceDictionaryDialog.category=SCRCAT_CONFIG def script_activateTemporaryDictionaryDialog(self, gesture): wx.CallAfter(gui.mainFrame.onTemporaryDictionaryCommand, None) # Translators: Input help mode message for opening temporary dictionary. script_activateTemporaryDictionaryDialog.__doc__ = _("Shows the NVDA temporary dictionary dialog") script_activateTemporaryDictionaryDialog.category=SCRCAT_CONFIG def script_activateSpeechSymbolsDialog(self, gesture): wx.CallAfter(gui.mainFrame.onSpeechSymbolsCommand, None) # Translators: Input help mode message for go to punctuation/symbol pronunciation dialog. script_activateSpeechSymbolsDialog.__doc__ = _("Shows the NVDA symbol pronunciation dialog") script_activateSpeechSymbolsDialog.category=SCRCAT_CONFIG def script_activateInputGesturesDialog(self, gesture): wx.CallAfter(gui.mainFrame.onInputGesturesCommand, None) # Translators: Input help mode message for go to input gestures dialog command. script_activateInputGesturesDialog.__doc__ = _("Shows the NVDA input gestures dialog") script_activateInputGesturesDialog.category=SCRCAT_CONFIG def script_saveConfiguration(self,gesture): wx.CallAfter(gui.mainFrame.onSaveConfigurationCommand, None) # Translators: Input help mode message for save current configuration command. script_saveConfiguration.__doc__ = _("Saves the current NVDA configuration") script_saveConfiguration.category=SCRCAT_CONFIG def script_revertConfiguration(self,gesture): scriptCount=scriptHandler.getLastScriptRepeatCount() if scriptCount==0: gui.mainFrame.onRevertToSavedConfigurationCommand(None) elif scriptCount==2: gui.mainFrame.onRevertToDefaultConfigurationCommand(None) # Translators: Input help mode message for apply last saved or default settings command. script_revertConfiguration.__doc__ = _("Pressing once reverts the current configuration to the most recently saved state. Pressing three times reverts to factory defaults.") script_revertConfiguration.category=SCRCAT_CONFIG def script_activatePythonConsole(self,gesture): if globalVars.appArgs.secure or config.isAppX: return import pythonConsole if not pythonConsole.consoleUI: pythonConsole.initialize() pythonConsole.consoleUI.console.updateNamespaceSnapshotVars() pythonConsole.activate() # Translators: Input help mode message for activate python console command. script_activatePythonConsole.__doc__ = _("Activates the NVDA Python Console, primarily useful for development") script_activatePythonConsole.category=SCRCAT_TOOLS def script_activateAddonsManager(self,gesture): wx.CallAfter(gui.mainFrame.onAddonsManagerCommand, None) # Translators: Input help mode message for activate manage add-ons command. script_activateAddonsManager.__doc__ = _("Activates the NVDA Add-ons Manager to install and uninstall add-on packages for NVDA") script_activateAddonsManager.category=SCRCAT_TOOLS def script_toggleSpeechViewer(self,gesture): if gui.speechViewer.isActive: # Translators: The message announced when disabling speech viewer. state = _("speech viewer disabled") gui.speechViewer.deactivate() gui.mainFrame.sysTrayIcon.menu_tools_toggleSpeechViewer.Check(False) else: # Translators: The message announced when enabling speech viewer. state = _("speech viewer enabled") gui.speechViewer.activate() gui.mainFrame.sysTrayIcon.menu_tools_toggleSpeechViewer.Check(True) ui.message(state) # Translators: Input help mode message for toggle speech viewer command. script_toggleSpeechViewer.__doc__ = _("Toggles the NVDA Speech viewer, a floating window that allows you to view all the text that NVDA is currently speaking") script_toggleSpeechViewer.category=SCRCAT_TOOLS def script_braille_toggleTether(self, gesture): values = [x[0] for x in braille.handler.tetherValues] labels = [x[1] for x in braille.handler.tetherValues] try: index = values.index( braille.handler.TETHER_AUTO if config.conf["braille"]["autoTether"] else config.conf["braille"]["tetherTo"] ) except: index=0 newIndex = (index+1) % len(values) newTetherChoice = values[newIndex] if newTetherChoice==braille.handler.TETHER_AUTO: config.conf["braille"]["autoTether"] = True config.conf["braille"]["tetherTo"] = braille.handler.TETHER_FOCUS else: config.conf["braille"]["autoTether"] = False braille.handler.setTether(newTetherChoice, auto=False) if newTetherChoice==braille.handler.TETHER_REVIEW: braille.handler.handleReviewMove(shouldAutoTether=False) else: braille.handler.handleGainFocus(api.getFocusObject(),shouldAutoTether=False) # Translators: Reports which position braille is tethered to # (braille can be tethered automatically or to either focus or review position). ui.message(_("Braille tethered %s") % labels[newIndex]) # Translators: Input help mode message for toggle braille tether to command (tethered means connected to or follows). script_braille_toggleTether.__doc__ = _("Toggle tethering of braille between the focus and the review position") script_braille_toggleTether.category=SCRCAT_BRAILLE def script_braille_toggleFocusContextPresentation(self, gesture): values = [x[0] for x in braille.focusContextPresentations] labels = [x[1] for x in braille.focusContextPresentations] try: index = values.index(config.conf["braille"]["focusContextPresentation"]) except: index=0 newIndex = (index+1) % len(values) config.conf["braille"]["focusContextPresentation"] = values[newIndex] braille.invalidateCachedFocusAncestors(0) braille.handler.handleGainFocus(api.getFocusObject()) # Translators: Reports the new state of braille focus context presentation. # %s will be replaced with the context presentation setting. # For example, the full message might be "Braille focus context presentation: fill display for context changes" ui.message(_("Braille focus context presentation: %s")%labels[newIndex].lower()) # Translators: Input help mode message for toggle braille focus context presentation command. script_braille_toggleFocusContextPresentation.__doc__ = _("Toggle the way context information is presented in braille") script_braille_toggleFocusContextPresentation.category=SCRCAT_BRAILLE def script_braille_toggleShowCursor(self, gesture): if config.conf["braille"]["showCursor"]: # Translators: The message announced when toggling the braille cursor. state = _("Braille cursor off") config.conf["braille"]["showCursor"]=False else: # Translators: The message announced when toggling the braille cursor. state = _("Braille cursor on") config.conf["braille"]["showCursor"]=True ui.message(state) # Translators: Input help mode message for toggle braille cursor command. script_braille_toggleShowCursor.__doc__ = _("Toggle the braille cursor on and off") script_braille_toggleShowCursor.category=SCRCAT_BRAILLE def script_braille_cycleCursorShape(self, gesture): if not config.conf["braille"]["showCursor"]: # Translators: A message reported when changing the braille cursor shape when the braille cursor is turned off. ui.message(_("Braille cursor is turned off")) return shapes = [s[0] for s in braille.CURSOR_SHAPES] if braille.handler.getTether() == braille.handler.TETHER_FOCUS: cursorShape = "cursorShapeFocus" else: cursorShape = "cursorShapeReview" try: index = shapes.index(config.conf["braille"][cursorShape]) + 1 except: index = 1 if index >= len(braille.CURSOR_SHAPES): index = 0 config.conf["braille"][cursorShape] = braille.CURSOR_SHAPES[index][0] shapeMsg = braille.CURSOR_SHAPES[index][1] # Translators: Reports which braille cursor shape is activated. ui.message(_("Braille cursor %s") % shapeMsg) # Translators: Input help mode message for cycle braille cursor shape command. script_braille_cycleCursorShape.__doc__ = _("Cycle through the braille cursor shapes") script_braille_cycleCursorShape.category=SCRCAT_BRAILLE def script_reportClipboardText(self,gesture): try: text = api.getClipData() except: text = None if not text or not isinstance(text,str) or text.isspace(): # Translators: Presented when there is no text on the clipboard. ui.message(_("There is no text on the clipboard")) return if len(text) < 1024: ui.message(text) else: # Translators: If the number of characters on the clipboard is greater than about 1000, it reports this message and gives number of characters on the clipboard. # Example output: The clipboard contains a large portion of text. It is 2300 characters long. ui.message(_("The clipboard contains a large portion of text. It is %s characters long") % len(text)) # Translators: Input help mode message for report clipboard text command. script_reportClipboardText.__doc__ = _("Reports the text on the Windows clipboard") script_reportClipboardText.category=SCRCAT_SYSTEM def script_review_markStartForCopy(self, gesture): reviewPos = api.getReviewPosition() # attach the marker to obj so that the marker is cleaned up when obj is cleaned up. reviewPos.obj._copyStartMarker = reviewPos.copy() # represents the start location reviewPos.obj._selectThenCopyRange = None # we may be part way through a select, reset the copy range. # Translators: Indicates start of review cursor text to be copied to clipboard. ui.message(_("Start marked")) # Translators: Input help mode message for mark review cursor position for a select or copy command (that is, marks the current review cursor position as the starting point for text to be selected). script_review_markStartForCopy.__doc__ = _("Marks the current position of the review cursor as the start of text to be selected or copied") script_review_markStartForCopy.category=SCRCAT_TEXTREVIEW @script( # Translators: Input help mode message for move review cursor to marked start position for a # select or copy command description=_( "Move the review cursor to the position marked as the start of text to be selected or copied" ), category=SCRCAT_TEXTREVIEW, gesture="kb:NVDA+shift+F9", ) def script_review_moveToStartMarkedForCopy(self, gesture): pos = api.getReviewPosition() if not getattr(pos.obj, "_copyStartMarker", None): # Translators: Presented when attempting to move to the start marker for copy but none has been set. ui.reviewMessage(_("No start marker set")) return startMarker = pos.obj._copyStartMarker.copy() api.setReviewPosition(startMarker) startMarker.collapse() startMarker.expand(textInfos.UNIT_CHARACTER) speech.speakTextInfo(startMarker, unit=textInfos.UNIT_CHARACTER, reason=controlTypes.REASON_CARET) def script_review_copy(self, gesture): pos = api.getReviewPosition().copy() if not getattr(pos.obj, "_copyStartMarker", None): # Translators: Presented when attempting to copy some review cursor text but there is no start marker. ui.message(_("No start marker set")) return startMarker = api.getReviewPosition().obj._copyStartMarker # first call, try to set the selection. if scriptHandler.getLastScriptRepeatCount()==0 : if getattr(pos.obj, "_selectThenCopyRange", None): # we have already tried selecting the text, dont try again. For now selections can not be ammended. # Translators: Presented when text has already been marked for selection, but not yet copied. ui.message(_("Press twice to copy or reset the start marker")) return copyMarker = startMarker.copy() # Check if the end position has moved if pos.compareEndPoints(startMarker, "endToEnd") > 0: # user has moved the cursor 'forward' # start becomes the original start copyMarker.setEndPoint(startMarker, "startToStart") # end needs to be updated to the current cursor position. copyMarker.setEndPoint(pos, "endToEnd") copyMarker.move(textInfos.UNIT_CHARACTER, 1, endPoint="end") else:# user has moved the cursor 'backwards' or not at all. # when the cursor is not moved at all we still want to select the character have under the cursor # start becomes the current cursor position position copyMarker.setEndPoint(pos, "startToStart") # end becomes the original start position plus 1 copyMarker.setEndPoint(startMarker, "endToEnd") copyMarker.move(textInfos.UNIT_CHARACTER, 1, endPoint="end") if copyMarker.compareEndPoints(copyMarker, "startToEnd") == 0: # Translators: Presented when there is no text selection to copy from review cursor. ui.message(_("No text to copy")) api.getReviewPosition().obj._copyStartMarker = None return api.getReviewPosition().obj._selectThenCopyRange = copyMarker # for applications such as word, where the selected text is not automatically spoken we must monitor it ourself try: # old selection info must be saved so that its possible to report on the changes to the selection. oldInfo=pos.obj.makeTextInfo(textInfos.POSITION_SELECTION) except Exception as e: log.debug("Error trying to get initial selection information %s" % e) pass try: copyMarker.updateSelection() if hasattr(pos.obj, "reportSelectionChange"): # wait for applications such as word to update their selection so that we can detect it try: pos.obj.reportSelectionChange(oldInfo) except Exception as e: log.debug("Error trying to report the updated selection: %s" % e) except NotImplementedError as e: # we are unable to select the text, leave the _copyStartMarker in place in case the user wishes to copy the text. # Translators: Presented when unable to select the marked text. ui.message(_("Can't select text, press twice to copy")) log.debug("Error trying to update selection: %s" % e) return elif scriptHandler.getLastScriptRepeatCount()==1: # the second call, try to copy the text copyMarker = pos.obj._selectThenCopyRange if copyMarker.copyToClipboard(): # Translators: Presented when some review text has been copied to clipboard. ui.message(_("Review selection copied to clipboard")) else: # Translators: Presented when unable to copy to the clipboard because of an error. ui.message(_("Unable to copy")) # on the second call always clean up the start marker api.getReviewPosition().obj._selectThenCopyRange = None api.getReviewPosition().obj._copyStartMarker = None return # Translators: Input help mode message for the select then copy command. The select then copy command first selects the review cursor text, then copies it to the clipboard. script_review_copy.__doc__ = _("If pressed once, the text from the previously set start marker up to and including the current position of the review cursor is selected. If pressed twice, the text is copied to the clipboard") script_review_copy.category=SCRCAT_TEXTREVIEW def script_braille_scrollBack(self, gesture): braille.handler.scrollBack() # Translators: Input help mode message for a braille command. script_braille_scrollBack.__doc__ = _("Scrolls the braille display back") script_braille_scrollBack.bypassInputHelp = True script_braille_scrollBack.category=SCRCAT_BRAILLE def script_braille_scrollForward(self, gesture): braille.handler.scrollForward() # Translators: Input help mode message for a braille command. script_braille_scrollForward.__doc__ = _("Scrolls the braille display forward") script_braille_scrollForward.bypassInputHelp = True script_braille_scrollForward.category=SCRCAT_BRAILLE def script_braille_routeTo(self, gesture): braille.handler.routeTo(gesture.routingIndex) # Translators: Input help mode message for a braille command. script_braille_routeTo.__doc__ = _("Routes the cursor to or activates the object under this braille cell") script_braille_routeTo.category=SCRCAT_BRAILLE def script_braille_reportFormatting(self, gesture): info = braille.handler.getTextInfoForWindowPos(gesture.routingIndex) if info is None: # Translators: Reported when trying to obtain formatting information (such as font name, indentation and so on) but there is no formatting information for the text under cursor. ui.message(_("No formatting information")) return self._reportFormattingHelper(info, False) # Translators: Input help mode message for Braille report formatting command. script_braille_reportFormatting.__doc__ = _("Reports formatting info for the text under this braille cell") script_braille_reportFormatting.category=SCRCAT_BRAILLE def script_braille_previousLine(self, gesture): if braille.handler.buffer.regions: braille.handler.buffer.regions[-1].previousLine(start=True) # Translators: Input help mode message for a braille command. script_braille_previousLine.__doc__ = _("Moves the braille display to the previous line") script_braille_previousLine.category=SCRCAT_BRAILLE def script_braille_nextLine(self, gesture): if braille.handler.buffer.regions: braille.handler.buffer.regions[-1].nextLine() # Translators: Input help mode message for a braille command. script_braille_nextLine.__doc__ = _("Moves the braille display to the next line") script_braille_nextLine.category=SCRCAT_BRAILLE def script_braille_dots(self, gesture): brailleInput.handler.input(gesture.dots) # Translators: Input help mode message for a braille command. script_braille_dots.__doc__= _("Inputs braille dots via the braille keyboard") script_braille_dots.category=SCRCAT_BRAILLE def script_braille_toFocus(self, gesture): braille.handler.setTether(braille.handler.TETHER_FOCUS, auto=True) if braille.handler.getTether() == braille.handler.TETHER_REVIEW: self.script_navigatorObject_toFocus(gesture) else: obj = api.getFocusObject() region = braille.handler.mainBuffer.regions[-1] if braille.handler.mainBuffer.regions else None if region and region.obj==obj: braille.handler.mainBuffer.focus(region) if region.brailleCursorPos is not None: braille.handler.mainBuffer.scrollTo(region, region.brailleCursorPos) elif region.brailleSelectionStart is not None: braille.handler.mainBuffer.scrollTo(region, region.brailleSelectionStart) braille.handler.mainBuffer.updateDisplay() else: braille.handler.handleGainFocus(obj,shouldAutoTether=False) # Translators: Input help mode message for a braille command. script_braille_toFocus.__doc__= _("Moves the braille display to the current focus") script_braille_toFocus.category=SCRCAT_BRAILLE def script_braille_eraseLastCell(self, gesture): brailleInput.handler.eraseLastCell() # Translators: Input help mode message for a braille command. script_braille_eraseLastCell.__doc__= _("Erases the last entered braille cell or character") script_braille_eraseLastCell.category=SCRCAT_BRAILLE def script_braille_enter(self, gesture): brailleInput.handler.enter() # Translators: Input help mode message for a braille command. script_braille_enter.__doc__= _("Translates any braille input and presses the enter key") script_braille_enter.category=SCRCAT_BRAILLE def script_braille_translate(self, gesture): brailleInput.handler.translate() # Translators: Input help mode message for a braille command. script_braille_translate.__doc__= _("Translates any braille input") script_braille_translate.category=SCRCAT_BRAILLE def script_braille_toggleShift(self, gesture): brailleInput.handler.toggleModifier("shift") # Translators: Input help mode message for a braille command. script_braille_toggleShift.__doc__= _("Virtually toggles the shift key to emulate a keyboard shortcut with braille input") script_braille_toggleShift.category=inputCore.SCRCAT_KBEMU script_braille_toggleShift.bypassInputHelp = True def script_braille_toggleControl(self, gesture): brailleInput.handler.toggleModifier("control") # Translators: Input help mode message for a braille command. script_braille_toggleControl.__doc__= _("Virtually toggles the control key to emulate a keyboard shortcut with braille input") script_braille_toggleControl.category=inputCore.SCRCAT_KBEMU script_braille_toggleControl.bypassInputHelp = True def script_braille_toggleAlt(self, gesture): brailleInput.handler.toggleModifier("alt") # Translators: Input help mode message for a braille command. script_braille_toggleAlt.__doc__= _("Virtually toggles the alt key to emulate a keyboard shortcut with braille input") script_braille_toggleAlt.category=inputCore.SCRCAT_KBEMU script_braille_toggleAlt.bypassInputHelp = True def script_braille_toggleWindows(self, gesture): brailleInput.handler.toggleModifier("leftWindows") # Translators: Input help mode message for a braille command. script_braille_toggleWindows.__doc__= _("Virtually toggles the left windows key to emulate a keyboard shortcut with braille input") script_braille_toggleWindows.category=inputCore.SCRCAT_KBEMU script_braille_toggleAlt.bypassInputHelp = True def script_braille_toggleNVDAKey(self, gesture): brailleInput.handler.toggleModifier("NVDA") # Translators: Input help mode message for a braille command. script_braille_toggleNVDAKey.__doc__= _("Virtually toggles the NVDA key to emulate a keyboard shortcut with braille input") script_braille_toggleNVDAKey.category=inputCore.SCRCAT_KBEMU script_braille_toggleNVDAKey.bypassInputHelp = True def script_reloadPlugins(self, gesture): import globalPluginHandler appModuleHandler.reloadAppModules() globalPluginHandler.reloadGlobalPlugins() NVDAObject.clearDynamicClassCache() # Translators: Presented when plugins (app modules and global plugins) are reloaded. ui.message(_("Plugins reloaded")) # Translators: Input help mode message for reload plugins command. script_reloadPlugins.__doc__=_("Reloads app modules and global plugins without restarting NVDA, which can be Useful for developers") script_reloadPlugins.category=SCRCAT_TOOLS def script_navigatorObject_nextInFlow(self,gesture): curObject=api.getNavigatorObject() newObject=None if curObject.simpleFirstChild: newObject=curObject.simpleFirstChild elif curObject.simpleNext: newObject=curObject.simpleNext elif curObject.simpleParent: parent=curObject.simpleParent while parent and not parent.simpleNext: parent=parent.simpleParent if parent: newObject=parent.simpleNext if newObject: api.setNavigatorObject(newObject) speech.speakObject(newObject,reason=controlTypes.REASON_FOCUS) else: # Translators: a message when there is no next object when navigating ui.reviewMessage(_("No next")) # Translators: Input help mode message for a touchscreen gesture. script_navigatorObject_nextInFlow.__doc__=_("Moves to the next object in a flattened view of the object navigation hierarchy") script_navigatorObject_nextInFlow.category=SCRCAT_OBJECTNAVIGATION def script_navigatorObject_previousInFlow(self,gesture): curObject=api.getNavigatorObject() newObject=curObject.simplePrevious if newObject: while newObject.simpleLastChild: newObject=newObject.simpleLastChild else: newObject=curObject.simpleParent if newObject: api.setNavigatorObject(newObject) speech.speakObject(newObject,reason=controlTypes.REASON_FOCUS) else: # Translators: a message when there is no previous object when navigating ui.reviewMessage(_("No previous")) # Translators: Input help mode message for a touchscreen gesture. script_navigatorObject_previousInFlow.__doc__=_("Moves to the previous object in a flattened view of the object navigation hierarchy") script_navigatorObject_previousInFlow.category=SCRCAT_OBJECTNAVIGATION def script_touch_changeMode(self,gesture): mode=touchHandler.handler._curTouchMode index=touchHandler.availableTouchModes.index(mode) index=(index+1)%len(touchHandler.availableTouchModes) newMode=touchHandler.availableTouchModes[index] touchHandler.handler._curTouchMode=newMode try: newModeLabel=touchHandler.touchModeLabels[newMode] except KeyError: # Translators: Cycles through available touch modes (a group of related touch gestures; example output: "object mode"; see the user guide for more information on touch modes). newModeLabel=_("%s mode")%newMode ui.message(newModeLabel) # Translators: Input help mode message for a touchscreen gesture. script_touch_changeMode.__doc__=_("Cycles between available touch modes") script_touch_changeMode.category=SCRCAT_TOUCH def script_touch_newExplore(self,gesture): touchHandler.handler.screenExplorer.moveTo(gesture.x,gesture.y,new=True) # Translators: Input help mode message for a touchscreen gesture. script_touch_newExplore.__doc__=_("Reports the object and content directly under your finger") script_touch_newExplore.category=SCRCAT_TOUCH def script_touch_explore(self,gesture): touchHandler.handler.screenExplorer.moveTo(gesture.x,gesture.y) # Translators: Input help mode message for a touchscreen gesture. script_touch_explore.__doc__=_("Reports the new object or content under your finger if different to where your finger was last") script_touch_explore.category=SCRCAT_TOUCH def script_touch_hoverUp(self,gesture): #Specifically for touch typing with onscreen keyboard keys # #7309: by default, one mustdouble tap the touch key. To restore old behavior, go to Touch Interaction dialog and change touch typing option. if config.conf["touch"]["touchTyping"]: obj=api.getNavigatorObject() import NVDAObjects.UIA if isinstance(obj,NVDAObjects.UIA.UIA) and obj.UIAElement.cachedClassName=="CRootKey": obj.doAction() script_touch_hoverUp.category=SCRCAT_TOUCH def script_activateConfigProfilesDialog(self, gesture): wx.CallAfter(gui.mainFrame.onConfigProfilesCommand, None) # Translators: Describes the command to open the Configuration Profiles dialog. script_activateConfigProfilesDialog.__doc__ = _("Shows the NVDA Configuration Profiles dialog") script_activateConfigProfilesDialog.category=SCRCAT_CONFIG_PROFILES def script_toggleConfigProfileTriggers(self,gesture): if config.conf.profileTriggersEnabled: config.conf.disableProfileTriggers() # Translators: The message announced when temporarily disabling all configuration profile triggers. state = _("Configuration profile triggers disabled") else: config.conf.enableProfileTriggers() # Explicitly trigger profiles for the current application. mod = api.getForegroundObject().appModule trigger = mod._configProfileTrigger = appModuleHandler.AppProfileTrigger(mod.appName) trigger.enter() # Translators: The message announced when re-enabling all configuration profile triggers. state = _("Configuration profile triggers enabled") ui.message(state) # Translators: Input help mode message for toggle configuration profile triggers command. script_toggleConfigProfileTriggers.__doc__=_("Toggles disabling of all configuration profile triggers. Disabling remains in effect until NVDA is restarted") script_toggleConfigProfileTriggers.category=SCRCAT_CONFIG def script_interactWithMath(self, gesture): import mathPres mathMl = mathPres.getMathMlFromTextInfo(api.getReviewPosition()) if not mathMl: obj = api.getNavigatorObject() if obj.role == controlTypes.ROLE_MATH: try: mathMl = obj.mathMl except (NotImplementedError, LookupError): mathMl = None if not mathMl: # Translators: Reported when the user attempts math interaction # with something that isn't math. ui.message(_("Not math")) return mathPres.interactWithMathMl(mathMl) # Translators: Describes a command. script_interactWithMath.__doc__ = _("Begins interaction with math content") def script_recognizeWithUwpOcr(self, gesture): if not winVersion.isUwpOcrAvailable(): # Translators: Reported when Windows 10 OCR is not available. ui.message(_("Windows 10 OCR not available")) return from contentRecog import uwpOcr, recogUi recog = uwpOcr.UwpOcr() recogUi.recognizeNavigatorObject(recog) # Translators: Describes a command. script_recognizeWithUwpOcr.__doc__ = _("Recognizes the content of the current navigator object with Windows 10 OCR") @script( # Translators: Input help mode message for toggle report CLDR command. description=_("Toggles on and off the reporting of CLDR characters, such as emojis"), category=SCRCAT_SPEECH, ) def script_toggleReportCLDR(self, gesture): if config.conf["speech"]["includeCLDR"]: # Translators: presented when the report CLDR is toggled. state = _("report CLDR characters off") config.conf["speech"]["includeCLDR"] = False else: # Translators: presented when the report CLDR is toggled. state = _("report CLDR characters on") config.conf["speech"]["includeCLDR"] = True characterProcessing.clearSpeechSymbols() ui.message(state) @script( # Translators: Describes a command. description=_( "Toggles the state of the screen curtain, " "either by making the screen black or SHOWING the contents of the screen. " "If pressed to enable once, the screen curtain is enabled until you restart NVDA. " "If pressed tree times, it is enabled until you disable it" ), category=SCRCAT_VISION ) def script_toggleScreenCurtain(self, gesture): message = None try: screenCurtainName = "screenCurtain" if not vision.getProviderClass(screenCurtainName).canStart(): # Translators: Reported when the screen curtain is not available. message = _("Screen curtain not available") return scriptCount = scriptHandler.getLastScriptRepeatCount() if scriptCount == 0 and screenCurtainName in vision.handler.providers: vision.handler.terminateProvider(screenCurtainName) # Translators: Reported when the screen curtain is disabled. message = _("Screen curtain disabled") elif scriptCount in (0, 2): temporary = scriptCount == 0 if not vision.handler.initializeProvider( screenCurtainName, temporary=temporary, ): # Translators: Reported when the screen curtain could not be enabled. message = _("Could not enable screen curtain") return else: if temporary: # Translators: Reported when the screen curtain is temporarily enabled. message = _("Temporary Screen curtain, enabled until next restart") else: # Translators: Reported when the screen curtain is enabled. message = _("Screen curtain enabled") finally: if message is not None: ui.message(message, speechPriority=speech.priorities.SPRI_NOW) __gestures = { # Basic "kb:NVDA+n": "showGui", "kb:NVDA+1": "toggleInputHelp", "kb:NVDA+q": "quit", "kb:NVDA+f2": "passNextKeyThrough", "kb(desktop):NVDA+shift+s":"toggleCurrentAppSleepMode", "kb(laptop):NVDA+shift+z":"toggleCurrentAppSleepMode", # System status "kb:NVDA+f12": "dateTime", "kb:NVDA+shift+b": "say_battery_status", "kb:NVDA+c": "reportClipboardText", # System focus "kb:NVDA+tab": "reportCurrentFocus", "kb:NVDA+t": "title", "kb:NVDA+b": "speakForeground", "kb(desktop):NVDA+end": "reportStatusLine", "kb(laptop):NVDA+shift+end": "reportStatusLine", # System caret "kb(desktop):NVDA+downArrow": "sayAll", "kb(laptop):NVDA+a": "sayAll", "kb(desktop):NVDA+upArrow": "reportCurrentLine", "kb(laptop):NVDA+l": "reportCurrentLine", "kb(desktop):NVDA+shift+upArrow": "reportCurrentSelection", "kb(laptop):NVDA+shift+s": "reportCurrentSelection", "kb:NVDA+f": "reportFormatting", # Object navigation "kb:NVDA+numpad5": "navigatorObject_current", "kb(laptop):NVDA+shift+o": "navigatorObject_current", "kb:NVDA+numpad8": "navigatorObject_parent", "kb(laptop):NVDA+shift+upArrow": "navigatorObject_parent", "ts(object):flickup":"navigatorObject_parent", "kb:NVDA+numpad4": "navigatorObject_previous", "kb(laptop):NVDA+shift+leftArrow": "navigatorObject_previous", "ts(object):flickleft":"navigatorObject_previousInFlow", "ts(object):2finger_flickleft":"navigatorObject_previous", "kb:NVDA+numpad6": "navigatorObject_next", "kb(laptop):NVDA+shift+rightArrow": "navigatorObject_next", "ts(object):flickright":"navigatorObject_nextInFlow", "ts(object):2finger_flickright":"navigatorObject_next", "kb:NVDA+numpad2": "navigatorObject_firstChild", "kb(laptop):NVDA+shift+downArrow": "navigatorObject_firstChild", "ts(object):flickdown":"navigatorObject_firstChild", "kb:NVDA+numpadMinus": "navigatorObject_toFocus", "kb(laptop):NVDA+backspace": "navigatorObject_toFocus", "kb:NVDA+numpadEnter": "review_activate", "kb(laptop):NVDA+enter": "review_activate", "ts:double_tap": "review_activate", "kb:NVDA+shift+numpadMinus": "navigatorObject_moveFocus", "kb(laptop):NVDA+shift+backspace": "navigatorObject_moveFocus", "kb:NVDA+numpadDelete": "navigatorObject_currentDimensions", "kb(laptop):NVDA+delete": "navigatorObject_currentDimensions", #Touch-specific commands "ts:tap":"touch_newExplore", "ts:hoverDown":"touch_newExplore", "ts:hover":"touch_explore", "ts:3finger_tap":"touch_changeMode", "ts:2finger_double_tap":"showGui", "ts:hoverUp":"touch_hoverUp", # Review cursor "kb:shift+numpad7": "review_top", "kb(laptop):NVDA+control+home": "review_top", "kb:numpad7": "review_previousLine", "ts(text):flickUp":"review_previousLine", "kb(laptop):NVDA+upArrow": "review_previousLine", "kb:numpad8": "review_currentLine", "kb(laptop):NVDA+shift+.": "review_currentLine", "kb:numpad9": "review_nextLine", "kb(laptop):NVDA+downArrow": "review_nextLine", "ts(text):flickDown":"review_nextLine", "kb:shift+numpad9": "review_bottom", "kb(laptop):NVDA+control+end": "review_bottom", "kb:numpad4": "review_previousWord", "kb(laptop):NVDA+control+leftArrow": "review_previousWord", "ts(text):2finger_flickLeft":"review_previousWord", "kb:numpad5": "review_currentWord", "kb(laptop):NVDA+control+.": "review_currentWord", "ts(text):hoverUp":"review_currentWord", "kb:numpad6": "review_nextWord", "kb(laptop):NVDA+control+rightArrow": "review_nextWord", "ts(text):2finger_flickRight":"review_nextWord", "kb:shift+numpad1": "review_startOfLine", "kb(laptop):NVDA+home": "review_startOfLine", "kb:numpad1": "review_previousCharacter", "kb(laptop):NVDA+leftArrow": "review_previousCharacter", "ts(text):flickLeft":"review_previousCharacter", "kb:numpad2": "review_currentCharacter", "kb(laptop):NVDA+.": "review_currentCharacter", "kb:numpad3": "review_nextCharacter", "kb(laptop):NVDA+rightArrow": "review_nextCharacter", "ts(text):flickRight":"review_nextCharacter", "kb:shift+numpad3": "review_endOfLine", "kb(laptop):NVDA+end": "review_endOfLine", "kb:numpadPlus": "review_sayAll", "kb(laptop):NVDA+shift+a": "review_sayAll", "ts(text):3finger_flickDown":"review_sayAll", "kb:NVDA+f9": "review_markStartForCopy", "kb:NVDA+f10": "review_copy", # Flat review "kb:NVDA+numpad7": "reviewMode_next", "kb(laptop):NVDA+pageUp": "reviewMode_next", "ts(object):2finger_flickUp": "reviewMode_next", "kb:NVDA+numpad1": "reviewMode_previous", "kb(laptop):NVDA+pageDown": "reviewMode_previous", "ts(object):2finger_flickDown": "reviewMode_previous", # Mouse "kb:numpadDivide": "leftMouseClick", "kb(laptop):NVDA+[": "leftMouseClick", "kb:shift+numpadDivide": "toggleLeftMouseButton", "kb(laptop):NVDA+control+[": "toggleLeftMouseButton", "kb:numpadMultiply": "rightMouseClick", "kb(laptop):NVDA+]": "rightMouseClick", "kb:shift+numpadMultiply": "toggleRightMouseButton", "kb(laptop):NVDA+control+]": "toggleRightMouseButton", "kb:NVDA+numpadDivide": "moveMouseToNavigatorObject", "kb(laptop):NVDA+shift+m": "moveMouseToNavigatorObject", "kb:NVDA+numpadMultiply": "moveNavigatorObjectToMouse", "kb(laptop):NVDA+shift+n": "moveNavigatorObjectToMouse", # Tree interceptors "kb:NVDA+space": "toggleVirtualBufferPassThrough", "kb:NVDA+control+space": "moveToParentTreeInterceptor", # Preferences dialogs and panels "kb:NVDA+control+g": "activateGeneralSettingsDialog", "kb:NVDA+control+s": "activateSynthesizerDialog", "kb:NVDA+control+v": "activateVoiceDialog", "kb:NVDA+control+a": "activateBrailleDisplayDialog", "kb:NVDA+control+k": "activateKeyboardSettingsDialog", "kb:NVDA+control+m": "activateMouseSettingsDialog", "kb:NVDA+control+o": "activateObjectPresentationDialog", "kb:NVDA+control+b": "activateBrowseModeDialog", "kb:NVDA+control+d": "activateDocumentFormattingDialog", # Configuration management "kb:NVDA+control+c": "saveConfiguration", "kb:NVDA+control+r": "revertConfiguration", "kb:NVDA+control+p": "activateConfigProfilesDialog", # Settings "kb:NVDA+shift+d":"cycleAudioDuckingMode", "kb:NVDA+2": "toggleSpeakTypedCharacters", "kb:NVDA+3": "toggleSpeakTypedWords", "kb:NVDA+4": "toggleSpeakCommandKeys", "kb:NVDA+p": "cycleSpeechSymbolLevel", "kb:NVDA+s": "speechMode", "kb:NVDA+m": "toggleMouseTracking", "kb:NVDA+u": "toggleProgressBarOutput", "kb:NVDA+5": "toggleReportDynamicContentChanges", "kb:NVDA+6": "toggleCaretMovesReviewCursor", "kb:NVDA+7": "toggleFocusMovesNavigatorObject", "kb:NVDA+8": "toggleAutoFocusFocusableElements", "kb:NVDA+control+t": "braille_toggleTether", # Synth settings ring "kb(desktop):NVDA+control+leftArrow": "previousSynthSetting", "kb(laptop):NVDA+shift+control+leftArrow": "previousSynthSetting", "kb(desktop):NVDA+control+rightArrow": "nextSynthSetting", "kb(laptop):NVDA+shift+control+rightArrow": "nextSynthSetting", "kb(desktop):NVDA+control+upArrow": "increaseSynthSetting", "kb(laptop):NVDA+shift+control+upArrow": "increaseSynthSetting", "kb(desktop):NVDA+control+downArrow": "decreaseSynthSetting", "kb(laptop):NVDA+control+shift+downArrow": "decreaseSynthSetting", # Braille keyboard "bk:dots" : "braille_dots", "bk:dot7" : "braille_eraseLastCell", "bk:dot8" : "braille_enter", "bk:dot7+dot8" : "braille_translate", # Tools "kb:NVDA+f1": "navigatorObject_devInfo", "kb:NVDA+control+f1": "reportAppModuleInfo", "kb:NVDA+control+z": "activatePythonConsole", "kb:NVDA+control+f3": "reloadPlugins", "kb(desktop):NVDA+control+f2": "test_navigatorDisplayModelText", "kb:NVDA+alt+m": "interactWithMath", "kb:NVDA+r": "recognizeWithUwpOcr", } #: The single global commands instance. #: @type: L{GlobalCommands} commands = GlobalCommands() class ConfigProfileActivationCommands(ScriptableObject): """Singleton scriptable object that collects scripts for available configuration profiles.""" scriptCategory = SCRCAT_CONFIG_PROFILES @classmethod def __new__(cls, *args, **kwargs): # Iterate through the available profiles, creating scripts for them. for profile in config.conf.listProfiles(): cls.addScriptForProfile(profile) return super(ConfigProfileActivationCommands, cls).__new__(cls) @classmethod def _getScriptNameForProfile(cls, name): invalidChars = set() for c in name: if not c.isalnum() and c != "_": invalidChars.add(c) for c in invalidChars: name=name.replace(c, b16encode(c.encode()).decode("ascii")) return "profile_%s" % name @classmethod def _profileScript(cls, name): if gui.shouldConfigProfileTriggersBeSuspended(): # Translators: a message indicating that configuration profiles can't be activated using gestures, # due to profile activation being suspended. state = _("Can't change the active profile while an NVDA dialog is open") elif config.conf.profiles[-1].name == name: config.conf.manualActivateProfile(None) # Translators: a message when a configuration profile is manually deactivated. # {profile} is replaced with the profile's name. state = _("{profile} profile deactivated").format(profile=name) else: config.conf.manualActivateProfile(name) # Translators: a message when a configuration profile is manually activated. # {profile} is replaced with the profile's name. state = _("{profile} profile activated").format(profile=name) ui.message(state) @classmethod def addScriptForProfile(cls, name): """Adds a script for the given configuration profile. This method will not check a profile's existence. @param name: The name of the profile to add a script for. @type name: str """ script = lambda self, gesture: cls._profileScript(name) funcName = script.__name__ = "script_%s" % cls._getScriptNameForProfile(name) # Just set the doc string of the script, using the decorator is overkill here. # Translators: The description shown in input help for a script that # activates or deactivates a config profile. # {profile} is replaced with the profile's name. script.__doc__ = _("Activates or deactivates the {profile} configuration profile").format(profile=name) setattr(cls, funcName, script) @classmethod def removeScriptForProfile(cls, name): """Removes a script for the given configuration profile. @param name: The name of the profile to remove a script for. @type name: str """ scriptName = cls._getScriptNameForProfile(name) cls._moveGesturesForProfileActivationScript(scriptName) delattr(cls, "script_%s" % scriptName) @classmethod def _moveGesturesForProfileActivationScript(cls, oldScriptName, newScriptName=None): """Patches the user gesture map to reflect updates to profile scripts. @param oldScriptName: The current name of the profile activation script. @type oldScriptName: str @param newScriptName: The new name for the profile activation script, if any. if C{None}, the gestures are only removed for the current profile sript. @type newScriptName: str """ gestureMap = inputCore.manager.userGestureMap for scriptCls, gesture, scriptName in gestureMap.getScriptsForAllGestures(): if scriptName != oldScriptName: continue moduleName = scriptCls.__module__ className = scriptCls.__name__ gestureMap.remove(gesture, moduleName, className, scriptName) if newScriptName is not None: gestureMap.add(gesture, moduleName, className, newScriptName) try: gestureMap.save() except: log.debugWarning("Couldn't save user gesture map after renaming profile script", exc_info=True) @classmethod def updateScriptForRenamedProfile(cls, oldName, newName): """Removes a script for the oldName configuration profile, and adds a new script for newName. Existing gestures in the gesture map are moved from the oldName to the newName profile. @param oldName: The current name of the profile. @type oldName: str @param newName: The new name for the profile. @type newName: str """ oldScriptName = cls._getScriptNameForProfile(oldName) newScriptName = cls._getScriptNameForProfile(newName) cls._moveGesturesForProfileActivationScript(oldScriptName, newScriptName) delattr(cls, "script_%s" % oldScriptName) cls.addScriptForProfile(newName) #: The single instance for the configuration profile activation commands. #: @type: L{ConfigProfileActivationCommands} configProfileActivationCommands = ConfigProfileActivationCommands()
1
27,807
Feel free to remove this line
nvaccess-nvda
py
@@ -25,7 +25,7 @@ module Subscriptions end def click_upcase_call_to_action_in_header - click_link "Upcase Membership" + click_link I18n.t("shared.subscriptions.single_user") end def settings_page
1
module Subscriptions def sign_in_as_user_with_subscription(*traits) @current_user = create( :subscriber, *traits, stripe_customer_id: FakeStripe::CUSTOMER_ID, completed_welcome: true ) visit practice_path(as: @current_user) end def sign_in_as_user_with_downgraded_subscription sign_in_as_user_with_subscription @current_user.subscription.change_plan(sku: create(:basic_plan).sku) end def sign_in_as_user_with_subscription_that_is_eligible_for_annual_upgrade sign_in_as_user_with_subscription Plan.first.update_attributes! annual_plan: create(:plan, :annual) visit practice_path(as: @current_user) end def click_landing_page_call_to_action click_link I18n.t("subscriptions.join_cta") end def click_upcase_call_to_action_in_header click_link "Upcase Membership" end def settings_page my_account_link.click page end def have_subscription_to(plan_name) have_css(".subscription", text: plan_name) end end
1
16,574
~~Should this match the key updated above?~~ :+1:
thoughtbot-upcase
rb
@@ -180,7 +180,9 @@ func isDirExcludedByFile(dir, tagFilename, header string) bool { Warnf("could not open exclusion tagfile: %v", err) return false } - defer f.Close() + defer func() { + _ = f.Close() + }() buf := make([]byte, len(header)) _, err = io.ReadFull(f, buf) // EOF is handled with a dedicated message, otherwise the warning were too cryptic
1
package main import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "strings" "sync" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" ) type rejectionCache struct { m map[string]bool mtx sync.Mutex } // Lock locks the mutex in rc. func (rc *rejectionCache) Lock() { if rc != nil { rc.mtx.Lock() } } // Unlock unlocks the mutex in rc. func (rc *rejectionCache) Unlock() { if rc != nil { rc.mtx.Unlock() } } // Get returns the last stored value for dir and a second boolean that // indicates whether that value was actually written to the cache. It is the // callers responsibility to call rc.Lock and rc.Unlock before using this // method, otherwise data races may occur. func (rc *rejectionCache) Get(dir string) (bool, bool) { if rc == nil || rc.m == nil { return false, false } v, ok := rc.m[dir] return v, ok } // Store stores a new value for dir. It is the callers responsibility to call // rc.Lock and rc.Unlock before using this method, otherwise data races may // occur. func (rc *rejectionCache) Store(dir string, rejected bool) { if rc == nil { return } if rc.m == nil { rc.m = make(map[string]bool) } rc.m[dir] = rejected } // RejectByNameFunc is a function that takes a filename of a // file that would be included in the backup. The function returns true if it // should be excluded (rejected) from the backup. type RejectByNameFunc func(path string) bool // RejectFunc is a function that takes a filename and os.FileInfo of a // file that would be included in the backup. The function returns true if it // should be excluded (rejected) from the backup. type RejectFunc func(path string, fi os.FileInfo) bool // rejectByPattern returns a RejectByNameFunc which rejects files that match // one of the patterns. func rejectByPattern(patterns []string) RejectByNameFunc { parsedPatterns := filter.ParsePatterns(patterns) return func(item string) bool { matched, err := filter.List(parsedPatterns, item) if err != nil { Warnf("error for exclude pattern: %v", err) } if matched { debug.Log("path %q excluded by an exclude pattern", item) return true } return false } } // Same as `rejectByPattern` but case insensitive. func rejectByInsensitivePattern(patterns []string) RejectByNameFunc { for index, path := range patterns { patterns[index] = strings.ToLower(path) } rejFunc := rejectByPattern(patterns) return func(item string) bool { return rejFunc(strings.ToLower(item)) } } // rejectIfPresent returns a RejectByNameFunc which itself returns whether a path // should be excluded. The RejectByNameFunc considers a file to be excluded when // it resides in a directory with an exclusion file, that is specified by // excludeFileSpec in the form "filename[:content]". The returned error is // non-nil if the filename component of excludeFileSpec is empty. If rc is // non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation // of a directory based on previous visits. func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) { if excludeFileSpec == "" { return nil, errors.New("name for exclusion tagfile is empty") } colon := strings.Index(excludeFileSpec, ":") if colon == 0 { return nil, fmt.Errorf("no name for exclusion tagfile provided") } tf, tc := "", "" if colon > 0 { tf = excludeFileSpec[:colon] tc = excludeFileSpec[colon+1:] } else { tf = excludeFileSpec } debug.Log("using %q as exclusion tagfile", tf) rc := &rejectionCache{} fn := func(filename string) bool { return isExcludedByFile(filename, tf, tc, rc) } return fn, nil } // isExcludedByFile interprets filename as a path and returns true if that file // is in an excluded directory. A directory is identified as excluded if it contains a // tagfile which bears the name specified in tagFilename and starts with // header. If rc is non-nil, it is used to expedite the evaluation of a // directory based on previous visits. func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool { if tagFilename == "" { return false } dir, base := filepath.Split(filename) if base == tagFilename { return false // do not exclude the tagfile itself } rc.Lock() defer rc.Unlock() rejected, visited := rc.Get(dir) if visited { return rejected } rejected = isDirExcludedByFile(dir, tagFilename, header) rc.Store(dir, rejected) return rejected } func isDirExcludedByFile(dir, tagFilename, header string) bool { tf := filepath.Join(dir, tagFilename) _, err := fs.Lstat(tf) if os.IsNotExist(err) { return false } if err != nil { Warnf("could not access exclusion tagfile: %v", err) return false } // when no signature is given, the mere presence of tf is enough reason // to exclude filename if len(header) == 0 { return true } // From this stage, errors mean tagFilename exists but it is malformed. // Warnings will be generated so that the user is informed that the // indented ignore-action is not performed. f, err := os.Open(tf) if err != nil { Warnf("could not open exclusion tagfile: %v", err) return false } defer f.Close() buf := make([]byte, len(header)) _, err = io.ReadFull(f, buf) // EOF is handled with a dedicated message, otherwise the warning were too cryptic if err == io.EOF { Warnf("invalid (too short) signature in exclusion tagfile %q\n", tf) return false } if err != nil { Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err) return false } if !bytes.Equal(buf, []byte(header)) { Warnf("invalid signature in exclusion tagfile %q\n", tf) return false } return true } // DeviceMap is used to track allowed source devices for backup. This is used to // check for crossing mount points during backup (for --one-file-system). It // maps the name of a source path to its device ID. type DeviceMap map[string]uint64 // NewDeviceMap creates a new device map from the list of source paths. func NewDeviceMap(allowedSourcePaths []string) (DeviceMap, error) { deviceMap := make(map[string]uint64) for _, item := range allowedSourcePaths { item, err := filepath.Abs(filepath.Clean(item)) if err != nil { return nil, err } fi, err := fs.Lstat(item) if err != nil { return nil, err } id, err := fs.DeviceID(fi) if err != nil { return nil, err } deviceMap[item] = id } if len(deviceMap) == 0 { return nil, errors.New("zero allowed devices") } return deviceMap, nil } // IsAllowed returns true if the path is located on an allowed device. func (m DeviceMap) IsAllowed(item string, deviceID uint64) (bool, error) { for dir := item; ; dir = filepath.Dir(dir) { debug.Log("item %v, test dir %v", item, dir) // find a parent directory that is on an allowed device (otherwise // we would not traverse the directory at all) allowedID, ok := m[dir] if !ok { if dir == filepath.Dir(dir) { // arrived at root, no allowed device found. this should not happen. break } continue } // if the item has a different device ID than the parent directory, // we crossed a file system boundary if allowedID != deviceID { debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID) return false, nil } // item is on allowed device, accept it debug.Log("item %v allowed", item) return true, nil } return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m) } // rejectByDevice returns a RejectFunc that rejects files which are on a // different file systems than the files/dirs in samples. func rejectByDevice(samples []string) (RejectFunc, error) { deviceMap, err := NewDeviceMap(samples) if err != nil { return nil, err } debug.Log("allowed devices: %v\n", deviceMap) return func(item string, fi os.FileInfo) bool { id, err := fs.DeviceID(fi) if err != nil { // This should never happen because gatherDevices() would have // errored out earlier. If it still does that's a reason to panic. panic(err) } allowed, err := deviceMap.IsAllowed(filepath.Clean(item), id) if err != nil { // this should not happen panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) } if allowed { // accept item return false } // reject everything except directories if !fi.IsDir() { return true } // special case: make sure we keep mountpoints (directories which // contain a mounted file system). Test this by checking if the parent // directory would be included. parentDir := filepath.Dir(filepath.Clean(item)) parentFI, err := fs.Lstat(parentDir) if err != nil { debug.Log("item %v: error running lstat() on parent directory: %v", item, err) // if in doubt, reject return true } parentDeviceID, err := fs.DeviceID(parentFI) if err != nil { debug.Log("item %v: getting device ID of parent directory: %v", item, err) // if in doubt, reject return true } parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID) if err != nil { debug.Log("item %v: error checking parent directory: %v", item, err) // if in doubt, reject return true } if parentAllowed { // we found a mount point, so accept the directory return false } // reject everything else return true }, nil } // rejectResticCache returns a RejectByNameFunc that rejects the restic cache // directory (if set). func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { if repo.Cache == nil { return func(string) bool { return false }, nil } cacheBase := repo.Cache.BaseDir() if cacheBase == "" { return nil, errors.New("cacheBase is empty string") } return func(item string) bool { if fs.HasPathPrefix(cacheBase, item) { debug.Log("rejecting restic cache directory %v", item) return true } return false }, nil } func rejectBySize(maxSizeStr string) (RejectFunc, error) { maxSize, err := parseSizeStr(maxSizeStr) if err != nil { return nil, err } return func(item string, fi os.FileInfo) bool { // directory will be ignored if fi.IsDir() { return false } filesize := fi.Size() if filesize > maxSize { debug.Log("file %s is oversize: %d", item, filesize) return true } return false }, nil } func parseSizeStr(sizeStr string) (int64, error) { if sizeStr == "" { return 0, errors.New("expected size, got empty string") } numStr := sizeStr[:len(sizeStr)-1] var unit int64 = 1 switch sizeStr[len(sizeStr)-1] { case 'b', 'B': // use initialized values, do nothing here case 'k', 'K': unit = 1024 case 'm', 'M': unit = 1024 * 1024 case 'g', 'G': unit = 1024 * 1024 * 1024 case 't', 'T': unit = 1024 * 1024 * 1024 * 1024 default: numStr = sizeStr } value, err := strconv.ParseInt(numStr, 10, 64) if err != nil { return 0, err } return value * unit, nil }
1
14,940
`gofmt` allows a more compact form `defer func() { _ = f.Close() }()` or just add `// nolint:errcheck` before `defer`.
restic-restic
go
@@ -10,11 +10,10 @@ import ( "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/types" ) // MinerCreate runs the `miner create` command against the filecoin process -func (f *Filecoin) MinerCreate(ctx context.Context, pledge uint64, collateral *types.AttoFIL, options ...ActionOption) (address.Address, error) { +func (f *Filecoin) MinerCreate(ctx context.Context, pledge uint64, collateral *big.Int, options ...ActionOption) (address.Address, error) { var out address.Address sPledge := fmt.Sprintf("%d", pledge)
1
package fat import ( "context" "fmt" "math/big" cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" "gx/ipfs/QmY5Grm8pJdiSSVsYxx4uNRgweY72EmYwuSDbRnbFok3iY/go-libp2p-peer" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/types" ) // MinerCreate runs the `miner create` command against the filecoin process func (f *Filecoin) MinerCreate(ctx context.Context, pledge uint64, collateral *types.AttoFIL, options ...ActionOption) (address.Address, error) { var out address.Address sPledge := fmt.Sprintf("%d", pledge) sCollateral := collateral.String() args := []string{"go-filecoin", "miner", "create"} for _, option := range options { args = append(args, option()...) } args = append(args, sPledge, sCollateral) if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { return address.Address{}, err } return out, nil } // MinerUpdatePeerid runs the `miner update-peerid` command against the filecoin process func (f *Filecoin) MinerUpdatePeerid(ctx context.Context, minerAddr address.Address, pid peer.ID, options ...ActionOption) (cid.Cid, error) { var out cid.Cid args := []string{"go-filecoin", "miner", "update-peerid"} for _, option := range options { args = append(args, option()...) } args = append(args, minerAddr.String(), pid.Pretty()) if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { return cid.Undef, err } return out, nil } // MinerAddAsk runs the `miner add-ask` command against the filecoin process func (f *Filecoin) MinerAddAsk(ctx context.Context, minerAddr address.Address, fil *big.Float, expiry big.Int, options ...ActionOption) (cid.Cid, error) { var out cid.Cid sMinerAddr := minerAddr.String() sExpiry := expiry.String() sFil := fil.String() args := []string{"go-filecoin", "miner", "add-ask"} for _, option := range options { args = append(args, option()...) } args = append(args, sMinerAddr, sFil, sExpiry) if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { return cid.Undef, err } return out, nil } // MinerOwner runs the `miner owner` command against the filecoin process func (f *Filecoin) MinerOwner(ctx context.Context, minerAddr address.Address) (address.Address, error) { var out address.Address sMinerAddr := minerAddr.String() if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "miner", "owner", sMinerAddr); err != nil { return address.Address{}, err } return out, nil } // MinerPledge runs the `miner pledge` command against the filecoin process func (f *Filecoin) MinerPledge(ctx context.Context, minerAddr address.Address) (*big.Int, error) { var out big.Int sMinerAddr := minerAddr.String() if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "miner", "pledge", sMinerAddr); err != nil { return big.NewInt(0), err } return &out, nil } // MinerPower runs the `miner power` command against the filecoin process func (f *Filecoin) MinerPower(ctx context.Context, minerAddr address.Address) (*big.Int, error) { var out big.Int sMinerAddr := minerAddr.String() if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "miner", "power", sMinerAddr); err != nil { return big.NewInt(0), err } return &out, nil } // MinerSetPrice runs the `miner set-price` command against the filecoin process func (f *Filecoin) MinerSetPrice(ctx context.Context, fil *big.Float, expiry *big.Int, options ...ActionOption) (*porcelain.MinerSetPriceResponse, error) { var out porcelain.MinerSetPriceResponse sExpiry := expiry.String() sFil := fil.String() args := []string{"go-filecoin", "miner", "set-price"} for _, option := range options { args = append(args, option()...) } args = append(args, sFil, sExpiry) if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { return nil, err } return &out, nil }
1
16,526
collateral is in FIL
filecoin-project-venus
go
@@ -84,6 +84,11 @@ func (q *ChannelEventQueue) dispatchMessage() { log.LOGGER.Warnf("node id is not found in the message") continue } + _, ok := q.channelPool.Load(nodeID) + if !ok { + rChannel := make(chan model.Event, rChanBufSize) + q.channelPool.LoadOrStore(nodeID, rChannel) + } rChannel, err := q.getRChannel(nodeID) if err != nil { log.LOGGER.Infof("fail to get dispatch channel for %s", nodeID)
1
package channelq import ( "fmt" "strings" "sync" "github.com/kubeedge/beehive/pkg/common/log" "github.com/kubeedge/beehive/pkg/core/context" "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model" ) // Read channel buffer size const ( rChanBufSize = 10 ) // EventSet holds a set of events type EventSet interface { Ack() error Get() (*model.Event, error) } // ChannelEventSet is the channel implementation of EventSet type ChannelEventSet struct { current model.Event messages <-chan model.Event } // NewChannelEventSet initializes a new ChannelEventSet instance func NewChannelEventSet(messages <-chan model.Event) *ChannelEventSet { return &ChannelEventSet{messages: messages} } // Ack acknowledges once the event is processed func (s *ChannelEventSet) Ack() error { return nil } // Get obtains one event from the queue func (s *ChannelEventSet) Get() (*model.Event, error) { var ok bool s.current, ok = <-s.messages if !ok { return nil, fmt.Errorf("failed to get message from cluster, reason: channel is closed") } return &s.current, nil } // ChannelEventQueue is the channel implementation of EventQueue type ChannelEventQueue struct { ctx *context.Context channelPool sync.Map } // NewChannelEventQueue initializes a new ChannelEventQueue func NewChannelEventQueue(ctx *context.Context) (*ChannelEventQueue, error) { q := ChannelEventQueue{ctx: ctx} go q.dispatchMessage() return &q, nil } // dispatchMessage gets the message from the cloud , extracts the // node id from it , gets the channel associated with the node // and pushes the event on the channel func (q *ChannelEventQueue) dispatchMessage() { for { msg, err := q.ctx.Receive("cloudhub") if err != nil { log.LOGGER.Infof("receive not Message format message") continue } resource := msg.Router.Resource tokens := strings.Split(resource, "/") numOfTokens := len(tokens) var nodeID string for i, token := range tokens { if token == "node" && i+1 < numOfTokens { nodeID = tokens[i+1] break } } if nodeID == "" { log.LOGGER.Warnf("node id is not found in the message") continue } rChannel, err := q.getRChannel(nodeID) if err != nil { log.LOGGER.Infof("fail to get dispatch channel for %s", nodeID) continue } event := model.MessageToEvent(&msg) select { case rChannel <- event: default: } } } func (q *ChannelEventQueue) getRChannel(nodeID string) (chan model.Event, error) { channels, ok := q.channelPool.Load(nodeID) if !ok { log.LOGGER.Errorf("rChannel for edge node %s is removed", nodeID) return nil, fmt.Errorf("rChannel not found") } rChannel := channels.(chan model.Event) return rChannel, nil } // Connect allocates rChannel for given project and group func (q *ChannelEventQueue) Connect(info *model.HubInfo) error { _, ok := q.channelPool.Load(info.NodeID) if ok { return fmt.Errorf("edge node %s is already connected", info.NodeID) } // allocate a new rchannel with default buffer size rChannel := make(chan model.Event, rChanBufSize) _, ok = q.channelPool.LoadOrStore(info.NodeID, rChannel) if ok { // rchannel is already allocated return fmt.Errorf("edge node %s is already connected", info.NodeID) } return nil } // Close closes rChannel for given project and group func (q *ChannelEventQueue) Close(info *model.HubInfo) error { channels, ok := q.channelPool.Load(info.NodeID) if !ok { log.LOGGER.Warnf("rChannel for edge node %s is already removed", info.NodeID) return nil } rChannel := channels.(chan model.Event) close(rChannel) q.channelPool.Delete(info.NodeID) return nil } // Publish sends message via the rchannel to Edge Controller func (q *ChannelEventQueue) Publish(info *model.HubInfo, event *model.Event) error { msg := model.EventToMessage(event) q.ctx.Send2Group("controller", msg) return nil } // Consume retrieves message from the rChannel for given project and group func (q *ChannelEventQueue) Consume(info *model.HubInfo) (EventSet, error) { rChannel, err := q.getRChannel(info.NodeID) if err != nil { return nil, err } return NewChannelEventSet((<-chan model.Event)(rChannel)), nil } // Workload returns the number of queue channels connected to queue func (q *ChannelEventQueue) Workload() (float64, error) { return 1, nil }
1
10,454
We should not create channel of node which are not connected.
kubeedge-kubeedge
go
@@ -40,6 +40,7 @@ class MongoCredentials { * @param {string} [options.username] The username used for authentication * @param {string} [options.password] The password used for authentication * @param {string} [options.source] The database that the user should authenticate against + * @param {string} [options.db] see options.source * @param {string} [options.mechanism] The method used to authenticate * @param {object} [options.mechanismProperties] Special properties used by some types of auth mechanisms */
1
'use strict'; // Resolves the default auth mechanism according to // https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst function getDefaultAuthMechanism(ismaster) { if (ismaster) { // If ismaster contains saslSupportedMechs, use scram-sha-256 // if it is available, else scram-sha-1 if (Array.isArray(ismaster.saslSupportedMechs)) { return ismaster.saslSupportedMechs.indexOf('SCRAM-SHA-256') >= 0 ? 'scram-sha-256' : 'scram-sha-1'; } // Fallback to legacy selection method. If wire version >= 3, use scram-sha-1 if (ismaster.maxWireVersion >= 3) { return 'scram-sha-1'; } } // Default for wireprotocol < 3 return 'mongocr'; } /** * A representation of the credentials used by MongoDB * * @class * @property {string} mechanism The method used to authenticate * @property {string} [username] The username used for authentication * @property {string} [password] The password used for authentication * @property {string} [source] The database that the user should authenticate against * @property {object} [mechanismProperties] Special properties used by some types of auth mechanisms */ class MongoCredentials { /** * Creates a new MongoCredentials object * * @param {object} [options] * @param {string} [options.username] The username used for authentication * @param {string} [options.password] The password used for authentication * @param {string} [options.source] The database that the user should authenticate against * @param {string} [options.mechanism] The method used to authenticate * @param {object} [options.mechanismProperties] Special properties used by some types of auth mechanisms */ constructor(options) { options = options || {}; this.username = options.username; this.password = options.password; this.source = options.source || options.db; this.mechanism = options.mechanism || 'default'; this.mechanismProperties = options.mechanismProperties || {}; if (this.mechanism.match(/MONGODB-AWS/i)) { if (this.username == null && process.env.AWS_ACCESS_KEY_ID) { this.username = process.env.AWS_ACCESS_KEY_ID; } if (this.password == null && process.env.AWS_SECRET_ACCESS_KEY) { this.password = process.env.AWS_SECRET_ACCESS_KEY; } if (this.mechanismProperties.AWS_SESSION_TOKEN == null && process.env.AWS_SESSION_TOKEN) { this.mechanismProperties.AWS_SESSION_TOKEN = process.env.AWS_SESSION_TOKEN; } } } /** * Determines if two MongoCredentials objects are equivalent * * @param {MongoCredentials} other another MongoCredentials object * @returns {boolean} true if the two objects are equal. */ equals(other) { return ( this.mechanism === other.mechanism && this.username === other.username && this.password === other.password && this.source === other.source ); } /** * If the authentication mechanism is set to "default", resolves the authMechanism * based on the server version and server supported sasl mechanisms. * * @param {object} [ismaster] An ismaster response from the server */ resolveAuthMechanism(ismaster) { // If the mechanism is not "default", then it does not need to be resolved if (this.mechanism.toLowerCase() === 'default') { this.mechanism = getDefaultAuthMechanism(ismaster); } } } module.exports = { MongoCredentials };
1
17,228
I think the docstring should say something like "Alias for the `source` option" or something similar.
mongodb-node-mongodb-native
js
@@ -129,6 +129,14 @@ public class Product implements Serializable { private String nutritionDataPer; @JsonProperty("no_nutrition_data") private String noNutritionData; + @JsonProperty("other_information_fr") + private String otherInformation; + @JsonProperty("conservation_conditions_fr") + private String conservationConditions; + @JsonProperty("recycling_instructions_to_discard_fr") + private String recyclingInstructionsToDiscard; + @JsonProperty("recycling_instructions_to_recycle_fr") + private String recyclingInstructionsToRecycle; @JsonProperty("warning") private String warning; @JsonProperty("customer_service")
1
package openfoodfacts.github.scrachx.openfood.models; import com.fasterxml.jackson.annotation.JsonAnyGetter; import com.fasterxml.jackson.annotation.JsonAnySetter; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.util.StdConverter; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.text.StringEscapeUtils; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; class ProductStringConverter extends StdConverter<String, String> { public String convert(String value) { return StringEscapeUtils.unescapeHtml4(value).replace("\\'", "'").replace("&quot", "'"); } } @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class Product implements Serializable { private static final long serialVersionUID = 1L; @JsonProperty("image_small_url") private String imageSmallUrl; @JsonProperty("image_nutrition_url") private String imageNutritionUrl; @JsonProperty("image_front_url") private String imageFrontUrl; @JsonProperty("image_ingredients_url") private String imageIngredientsUrl; @JsonProperty("link") private String manufactureUrl; private String url; private String code; @JsonProperty("traces_tags") private List<String> tracesTags = new ArrayList<>(); @JsonProperty("ingredients_that_may_be_from_palm_oil_tags") private List<String> ingredientsThatMayBeFromPalmOilTags = new ArrayList<>(); @JsonProperty("additives_tags") private List<String> additivesTags = new ArrayList<>(); @JsonProperty("allergens_hierarchy") private List<String> allergensHierarchy = new ArrayList<>(); @JsonProperty("manufacturing_places") private String manufacturingPlaces; private Nutriments nutriments; @JsonProperty("ingredients_from_palm_oil_tags") private List<Object> ingredientsFromPalmOilTags = new ArrayList<>(); @JsonProperty("brands_tags") private List<String> brandsTags = new ArrayList<>(); private String traces; @JsonProperty("categories_tags") private List<String> categoriesTags; @JsonProperty("ingredients_text") @JsonDeserialize(converter = ProductStringConverter.class) private String ingredientsText; @JsonProperty("product_name") @JsonDeserialize(converter = ProductStringConverter.class) private String productName; @JsonProperty("generic_name") @JsonDeserialize(converter = ProductStringConverter.class) private String genericName; @JsonProperty("ingredients_from_or_that_may_be_from_palm_oil_n") private long ingredientsFromOrThatMayBeFromPalmOilN; @JsonProperty("serving_size") private String servingSize; @JsonProperty("last_modified_by") private String lastModifiedBy; @JsonProperty("allergens_tags") private List<String> allergensTags; private String allergens; private String origins; private String stores; @JsonProperty("nutrition_grade_fr") private String nutritionGradeFr; @JsonProperty("nutrient_levels") private NutrientLevels nutrientLevels; private String countries; @JsonProperty("countries_tags") private List<String> countriesTags; private String brands; private String packaging; @JsonProperty("labels_hierarchy") private List<String> labelsHierarchy; @JsonProperty("labels_tags") private List<String> labelsTags; @JsonProperty("cities_tags") private List<Object> citiesTags = new ArrayList<>(); private String quantity; @JsonProperty("ingredients_from_palm_oil_n") private long ingredientsFromPalmOilN; @JsonProperty("image_url") private String imageUrl; @JsonProperty("emb_codes_tags") private List<Object> embTags = new ArrayList<>(); @JsonProperty("states_tags") private List<String> statesTags = new ArrayList<>(); @JsonProperty("vitamins_tags") private List<String> vitaminTags = new ArrayList<>(); @JsonProperty("minerals_tags") private List<String> mineralTags = new ArrayList<>(); @JsonProperty("amino_acids_tags") private List<String> aminoAcidTags = new ArrayList<>(); @JsonProperty("other_nutritional_substances_tags") private List<String> otherNutritionTags = new ArrayList<>(); @JsonProperty("created_t") private String createdDateTime; @JsonProperty("creator") private String creator; @JsonProperty("last_modified_t") private String lastModifiedTime; @JsonProperty("editors_tags") private List<String> editorsTags = new ArrayList<>(); @JsonProperty("nova_groups") private String novaGroups; @JsonProperty("lang") private String lang; @JsonProperty("purchase_places") private String purchasePlaces; @JsonProperty("nutrition_data_per") private String nutritionDataPer; @JsonProperty("no_nutrition_data") private String noNutritionData; @JsonProperty("warning") private String warning; @JsonProperty("customer_service") private String customerService; private Map<String, Object> additionalProperties = new HashMap<>(); @JsonAnyGetter public Map<String, Object> getAdditionalProperties() { return this.additionalProperties; } @JsonAnySetter public void setAdditionalProperty(String name, Object value) { this.additionalProperties.put(name, value); } public String getProductName(String languageCode) { if (additionalProperties.get("product_name_" + languageCode) != null) { return additionalProperties.get("product_name_" + languageCode) .toString() .replace("\\'", "'") .replace("&quot", "'"); } return null; } public String getIngredientsText(String languageCode) { if (additionalProperties.get("ingredients_text_" + languageCode) != null) { return additionalProperties.get("ingredients_text_" + languageCode).toString(); } return null; } /** * @return The statesTags */ public List<String> getStatesTags() { return statesTags; } public String getLastModifiedBy() { return lastModifiedBy; } public String getCustomerService() { return customerService; } public String getWarning() { return warning; } /** * @return The vitaminTags */ public List<String> getVitaminTags() { return vitaminTags; } public void setVitaminTags(List<String> vitaminTags) { this.vitaminTags = vitaminTags; } /** * @return The mineralsTags */ public List<String> getMineralTags() { return mineralTags; } public void setMineralTags(List<String> mineralTags) { this.mineralTags = mineralTags; } /** * @return The aminoAcidTags */ public List<String> getAminoAcidTags() { return aminoAcidTags; } public void setAminoAcidTags(List<String> aminoAcidTags) { this.aminoAcidTags = aminoAcidTags; } /** * @return The otherNutritionTags */ public List<String> getOtherNutritionTags() { return otherNutritionTags; } public void setOtherNutritionTags(List<String> otherNutritionTags) { this.otherNutritionTags = otherNutritionTags; } /** * @return The imageSmallUrl */ public String getImageSmallUrl() { return imageSmallUrl; } /** * @return The imageFrontUrl */ public String getImageFrontUrl() { return imageFrontUrl; } /** * @return The imageIngredientsUrl */ public String getImageIngredientsUrl() { return imageIngredientsUrl; } /** * @return The imageNutritionUrl */ public String getImageNutritionUrl() { return imageNutritionUrl; } /** * @return The manufactureUrl */ public String getManufactureUrl() { return manufactureUrl; } /** * @return The url */ public String getUrl() { return url; } /** * @return The code */ public String getCode() { return code; } public void setCode(String code) { this.code = code; } /** * @return The tracesTags */ public List<String> getTracesTags() { return tracesTags; } /** * @return The ingredientsThatMayBeFromPalmOilTags */ public List<String> getIngredientsThatMayBeFromPalmOilTags() { return ingredientsThatMayBeFromPalmOilTags; } /** * @return The additivesTags */ public List<String> getAdditivesTags() { return additivesTags; } /** * @return The allergensHierarchy */ public List<String> getAllergensHierarchy() { return allergensHierarchy; } /** * @return The manufacturingPlaces */ public String getManufacturingPlaces() { return manufacturingPlaces; } /** * @return The nutriments */ public Nutriments getNutriments() { return nutriments; } /** * @return The ingredientsFromPalmOilTags */ public List<Object> getIngredientsFromPalmOilTags() { return ingredientsFromPalmOilTags; } /** * @return The brandsTags */ public List<String> getBrandsTags() { return brandsTags; } /** * @return The traces */ public String getTraces() { return traces; } /** * @return The categoriesTags */ public List<String> getCategoriesTags() { return categoriesTags; } /** * @return The ingredientsText */ public String getIngredientsText() { return ingredientsText; } /** * @return The productName */ public String getProductName() { return productName; } /** * @return The genericName */ public String getGenericName() { return genericName; } /** * @return The ingredientsFromOrThatMayBeFromPalmOilN */ public long getIngredientsFromOrThatMayBeFromPalmOilN() { return ingredientsFromOrThatMayBeFromPalmOilN; } /** * @return The servingSize */ public String getServingSize() { return servingSize; } public List<String> getAllergensTags() { return allergensTags; } /** * @return The allergens */ public String getAllergens() { return allergens; } /** * @return The origins */ public String getOrigins() { return origins; } /** * @return The stores */ public String getStores() { if (stores == null) return null; return stores.replace(",", ", "); } /** * @return The nutritionGradeFr */ public String getNutritionGradeFr() { return nutritionGradeFr; } /** * @return The nutrientLevels */ public NutrientLevels getNutrientLevels() { return nutrientLevels; } /** * @return The countries */ public String getCountries() { if (countries == null) return null; return countries.replace(",", ", "); } /** * @return The brands */ public String getBrands() { if (brands == null) return null; return brands.replace(",", ", "); } /** * @return The packaging */ public String getPackaging() { if (packaging == null) return null; return packaging.replace(",", ", "); } /** * @return The labels tags */ public List<String> getLabelsTags() { return labelsTags; } /** * @return The labels hierarchy */ public List<String> getLabelsHierarchy() { return labelsHierarchy; } /** * @return The citiesTags */ public List<Object> getCitiesTags() { return citiesTags; } /** * @return The quantity */ public String getQuantity() { return quantity; } /** * @return The ingredientsFromPalmOilN */ public long getIngredientsFromPalmOilN() { return ingredientsFromPalmOilN; } /** * @return The imageUrl */ public String getImageUrl() { return imageUrl; } /** * @return The Emb_codes */ public List<Object> getEmbTags() { return embTags; } public List<String> getCountriesTags() { return countriesTags; } public String getCreator() { return creator; } public String getCreatedDateTime() { return createdDateTime; } public String getLastModifiedTime() { return lastModifiedTime; } public List<String> getEditors() { return editorsTags; } public String getNovaGroups() { return novaGroups; } public String getLang() { return lang; } public String getPurchasePlaces() { return purchasePlaces; } public String getNutritionDataPer() { return nutritionDataPer; } public String getNoNutritionData() { return noNutritionData; } @Override public String toString() { return new ToStringBuilder(this) .append("code", code) .append("productName", productName) .toString(); } }
1
66,525
Please use properties without suffix `_fr` so they can work across different languages as @teolemon mentioned
openfoodfacts-openfoodfacts-androidapp
java
@@ -132,6 +132,7 @@ struct st_h2o_http3client_req_t { static int handle_input_expect_data_frame(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc); static void start_request(struct st_h2o_http3client_req_t *req); +static void destroy_request(struct st_h2o_http3client_req_t *req); static struct st_h2o_http3client_conn_t *find_connection_for_origin(h2o_httpclient_ctx_t *ctx, const h2o_url_scheme_t *scheme, h2o_iovec_t authority)
1
/* * Copyright (c) 2018 Fastly, Kazuho Oku * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <assert.h> #include <errno.h> #include <stdlib.h> #include <sys/types.h> #include "quicly.h" #include "h2o/hostinfo.h" #include "h2o/httpclient.h" #include "h2o/http2_common.h" #include "h2o/http3_common.h" #include "h2o/http3_internal.h" /* Firefox-like tree is used to prioritize the requests: * * Root --+--(256)-- BLOCKING streams * | * +--( 1)-- NONCRITICAL --+--( 32)-- NORMAL streams * placeholder | * (0) +--( 16)-- NONBLOCKING streams * | * +--( 1)-- DELAYED streams */ #define H2O_HTTP3CLIENT_NONCRITICAL_PLACEHOLDER_ID 0 #define H2O_HTTP3CLIENT_NONCRITICAL_PLACEHOLDER_WEIGHT 1 #define H2O_HTTP3CLIENT_BLOCKING_STREAM_WEIGHT 256 #define H2O_HTTP3CLIENT_NORMAL_STREAM_WEIGHT 32 #define H2O_HTTP3CLIENT_NONBLOCKING_STREAM_WEIGHT 16 #define H2O_HTTP3CLIENT_DELAYED_STREAM_WEIGHT 1 #define H2O_HTTP3_ERROR_EOS H2O_HTTP3_ERROR_USER1 /* the client uses USER1 for signaling eos */ struct st_h2o_http3client_conn_t { h2o_http3_conn_t super; h2o_httpclient_ctx_t *ctx; struct { h2o_url_t origin_url; char named_serv[sizeof(H2O_UINT16_LONGEST_STR)]; } server; ptls_handshake_properties_t handshake_properties; h2o_timer_t timeout; h2o_hostinfo_getaddr_req_t *getaddr_req; /** * see h2o_http3_ctx_t::clients */ h2o_linklist_t clients_link; /** * linklist used to queue pending requests */ h2o_linklist_t pending_requests; }; struct st_h2o_http3client_req_t { /** * superclass */ h2o_httpclient_t super; /** * pointer to the connection */ struct st_h2o_http3client_conn_t *conn; /** * is NULL until connection is established */ quicly_stream_t *quic; /** * currently only used for pending_requests */ h2o_linklist_t link; /** * */ uint64_t bytes_left_in_data_frame; /** * */ h2o_buffer_t *sendbuf; /** * */ struct { /** * HTTP-level buffer that contains (part of) response body received. Is the variable registered as `h2o_httpclient::buf`. */ h2o_buffer_t *body; /** * QUIC stream-level buffer that contains bytes that have not yet been processed at the HTTP/3 framing decoding level. This * buffer may have gaps. The beginning offset of `partial_frame` is equal to `recvstate.data_off`. */ h2o_buffer_t *stream; /** * Retains the amount of stream-level data that was available in the previous call. This value is used to see if processing * of new stream data is necessary. */ size_t prev_bytes_available; } recvbuf; /** * called when new contigious data becomes available */ int (*handle_input)(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc); /** * proceed_req callback. The callback is invoked when all bytes in the send buffer is emitted for the first time (at this point * bytes_written is changed to zero, so that the proceed_req function is called once per every block being supplied from the * application). */ struct { h2o_httpclient_proceed_req_cb cb; size_t bytes_written; } proceed_req; }; static int handle_input_expect_data_frame(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc); static void start_request(struct st_h2o_http3client_req_t *req); static struct st_h2o_http3client_conn_t *find_connection_for_origin(h2o_httpclient_ctx_t *ctx, const h2o_url_scheme_t *scheme, h2o_iovec_t authority) { h2o_linklist_t *l; /* FIXME: * - check connection state(e.g., max_concurrent_streams, if received GOAWAY) * - use hashmap */ for (l = ctx->http3->clients.next; l != &ctx->http3->clients; l = l->next) { struct st_h2o_http3client_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3client_conn_t, clients_link, l); if (conn->server.origin_url.scheme == scheme && h2o_memis(conn->server.origin_url.authority.base, conn->server.origin_url.authority.len, authority.base, authority.len)) return conn; } return NULL; } static void destroy_connection(struct st_h2o_http3client_conn_t *conn) { if (h2o_linklist_is_linked(&conn->clients_link)) h2o_linklist_unlink(&conn->clients_link); /* FIXME pending_requests */ if (conn->getaddr_req != NULL) h2o_hostinfo_getaddr_cancel(conn->getaddr_req); h2o_timer_unlink(&conn->timeout); free(conn->server.origin_url.host.base); free(conn->server.origin_url.authority.base); h2o_http3_dispose_conn(&conn->super); free(conn); } static void on_connect_timeout(h2o_timer_t *timeout) { struct st_h2o_http3client_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3client_conn_t, timeout, timeout); destroy_connection(conn); } static void start_connect(struct st_h2o_http3client_conn_t *conn, struct sockaddr *sa, socklen_t salen) { quicly_conn_t *qconn; int ret; assert(conn->super.super.quic == NULL); assert(conn->getaddr_req == NULL); assert(h2o_timer_is_linked(&conn->timeout)); assert(conn->timeout.cb == on_connect_timeout); /* create QUIC connection context and attach (TODO pass address token, transport params) */ if ((ret = quicly_connect(&qconn, conn->ctx->http3->quic, conn->server.origin_url.host.base, sa, NULL, &conn->ctx->http3->next_cid, ptls_iovec_init(NULL, 0), &conn->handshake_properties, NULL)) != 0) { conn->super.super.quic = NULL; /* just in case */ goto Fail; } ++conn->ctx->http3->next_cid.master_id; /* FIXME check overlap */ if ((ret = h2o_http3_setup(&conn->super, qconn)) != 0) goto Fail; h2o_quic_send(&conn->super.super); return; Fail: destroy_connection(conn); } static void on_getaddr(h2o_hostinfo_getaddr_req_t *getaddr_req, const char *errstr, struct addrinfo *res, void *_conn) { struct st_h2o_http3client_conn_t *conn = _conn; assert(getaddr_req == conn->getaddr_req); conn->getaddr_req = NULL; if (errstr != NULL) { /* TODO reconnect */ abort(); } struct addrinfo *selected = h2o_hostinfo_select_one(res); start_connect(conn, selected->ai_addr, selected->ai_addrlen); } static void handle_control_stream_frame(h2o_http3_conn_t *_conn, uint8_t type, const uint8_t *payload, size_t len) { struct st_h2o_http3client_conn_t *conn = (void *)_conn; int err; const char *err_desc = NULL; if (!h2o_http3_has_received_settings(&conn->super)) { if (type != H2O_HTTP3_FRAME_TYPE_SETTINGS) { err = H2O_HTTP3_ERROR_MISSING_SETTINGS; goto Fail; } if ((err = h2o_http3_handle_settings_frame(&conn->super, payload, len, &err_desc)) != 0) goto Fail; assert(h2o_http3_has_received_settings(&conn->super)); /* issue requests */ while (!h2o_linklist_is_empty(&conn->pending_requests)) { struct st_h2o_http3client_req_t *req = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3client_req_t, link, conn->pending_requests.next); h2o_linklist_unlink(&req->link); start_request(req); } } else { switch (type) { case H2O_HTTP3_FRAME_TYPE_SETTINGS: err = H2O_HTTP3_ERROR_FRAME_UNEXPECTED; err_desc = "unexpected SETTINGS frame"; goto Fail; default: break; } } return; Fail: h2o_quic_close_connection(&conn->super.super, err, err_desc); } struct st_h2o_http3client_conn_t *create_connection(h2o_httpclient_ctx_t *ctx, h2o_url_t *origin) { static const h2o_http3_conn_callbacks_t callbacks = {{(void *)destroy_connection}, handle_control_stream_frame}; struct st_h2o_http3client_conn_t *conn = h2o_mem_alloc(sizeof(*conn)); h2o_http3_init_conn(&conn->super, ctx->http3, &callbacks); memset((char *)conn + sizeof(conn->super), 0, sizeof(*conn) - sizeof(conn->super)); conn->ctx = ctx; conn->server.origin_url = (h2o_url_t){origin->scheme, h2o_strdup(NULL, origin->authority.base, origin->authority.len), h2o_strdup(NULL, origin->host.base, origin->host.len)}; sprintf(conn->server.named_serv, "%" PRIu16, h2o_url_get_port(origin)); conn->handshake_properties.client.negotiated_protocols.list = h2o_http3_alpn; conn->handshake_properties.client.negotiated_protocols.count = sizeof(h2o_http3_alpn) / sizeof(h2o_http3_alpn[0]); h2o_linklist_insert(&ctx->http3->clients, &conn->clients_link); h2o_linklist_init_anchor(&conn->pending_requests); conn->getaddr_req = h2o_hostinfo_getaddr(conn->ctx->getaddr_receiver, conn->server.origin_url.host, h2o_iovec_init(conn->server.named_serv, strlen(conn->server.named_serv)), AF_UNSPEC, SOCK_DGRAM, IPPROTO_UDP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, conn); h2o_timer_link(conn->ctx->loop, conn->ctx->connect_timeout, &conn->timeout); conn->timeout.cb = on_connect_timeout; return conn; } static void destroy_request(struct st_h2o_http3client_req_t *req) { assert(req->quic == NULL); h2o_buffer_dispose(&req->sendbuf); h2o_buffer_dispose(&req->recvbuf.body); h2o_buffer_dispose(&req->recvbuf.stream); if (h2o_timer_is_linked(&req->super._timeout)) h2o_timer_unlink(&req->super._timeout); if (h2o_linklist_is_linked(&req->link)) h2o_linklist_unlink(&req->link); free(req); } static void detach_stream(struct st_h2o_http3client_req_t *req) { req->quic->callbacks = &quicly_stream_noop_callbacks; req->quic->data = NULL; req->quic = NULL; } static void close_stream(struct st_h2o_http3client_req_t *req, int err) { /* TODO are we expected to send two error codes? */ if (!quicly_sendstate_transfer_complete(&req->quic->sendstate)) quicly_reset_stream(req->quic, err); if (!quicly_recvstate_transfer_complete(&req->quic->recvstate)) quicly_request_stop(req->quic, err); detach_stream(req); } static void on_error_before_head(struct st_h2o_http3client_req_t *req, const char *errstr) { req->super._cb.on_head(&req->super, errstr, 0, 0, h2o_iovec_init(NULL, 0), NULL, 0, 0); } static int handle_input_data_payload(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc) { const char *errstr; /* save data, update states */ if (req->bytes_left_in_data_frame != 0) { size_t payload_bytes = req->bytes_left_in_data_frame; if (src_end - *src < payload_bytes) payload_bytes = src_end - *src; h2o_buffer_append(&req->recvbuf.body, *src, payload_bytes); *src += payload_bytes; req->bytes_left_in_data_frame -= payload_bytes; } if (req->bytes_left_in_data_frame == 0) req->handle_input = handle_input_expect_data_frame; /* call the handler */ errstr = NULL; if (*src == src_end && err != 0) { /* FIXME also check content-length? see what other protocol handlers do */ errstr = err == H2O_HTTP3_ERROR_EOS && req->bytes_left_in_data_frame == 0 ? h2o_httpclient_error_is_eos : "reset by peer"; } else { errstr = NULL; } if (req->super._cb.on_body(&req->super, errstr) != 0) return H2O_HTTP3_ERROR_INTERNAL; return 0; } int handle_input_expect_data_frame(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc) { if (*src == src_end && err == H2O_HTTP3_ERROR_EOS) { /* if the input is EOS, delegate the task to the payload processing function */ assert(req->bytes_left_in_data_frame == 0); } else { /* otherwise, read the frame */ h2o_http3_read_frame_t frame; int ret; if ((ret = h2o_http3_read_frame(&frame, 1, H2O_HTTP3_STREAM_TYPE_REQUEST, src, src_end, err_desc)) != 0) { /* incomplete */ if (ret == H2O_HTTP3_ERROR_INCOMPLETE && err == 0) return ret; req->super._cb.on_body(&req->super, "malformed frame"); return ret; } switch (frame.type) { case H2O_HTTP3_FRAME_TYPE_DATA: break; default: /* FIXME handle push_promise, trailers */ return 0; } req->bytes_left_in_data_frame = frame.length; } /* unexpected close of DATA frame is handled by handle_input_data_payload. We rely on the function to detect if the DATA frame * is closed right after the frame header */ req->handle_input = handle_input_data_payload; return handle_input_data_payload(req, src, src_end, err, err_desc); } static int handle_input_expect_headers(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc) { h2o_http3_read_frame_t frame; int status; h2o_headers_t headers = {NULL}; uint8_t header_ack[H2O_HPACK_ENCODE_INT_MAX_LENGTH]; size_t header_ack_len; int ret, frame_is_eos; /* read HEADERS frame */ if ((ret = h2o_http3_read_frame(&frame, 1, H2O_HTTP3_STREAM_TYPE_REQUEST, src, src_end, err_desc)) != 0) { if (ret == H2O_HTTP3_ERROR_INCOMPLETE) { if (err != 0) { on_error_before_head(req, err == H2O_HTTP3_ERROR_NONE ? "unexpected close" : "reset by peer"); return 0; } return ret; } on_error_before_head(req, "response header too large"); return H2O_HTTP3_ERROR_EXCESSIVE_LOAD; /* FIXME correct code? */ } frame_is_eos = *src == src_end && err != 0; if (frame.type != H2O_HTTP3_FRAME_TYPE_HEADERS) { switch (frame.type) { case H2O_HTTP3_FRAME_TYPE_DATA: *err_desc = "received DATA frame before HEADERS"; return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; default: return 0; } } if ((ret = h2o_qpack_parse_response(req->super.pool, req->conn->super.qpack.dec, req->quic->stream_id, &status, &headers, header_ack, &header_ack_len, frame.payload, frame.length, err_desc)) != 0) { if (ret == H2O_HTTP2_ERROR_INCOMPLETE) { /* the request is blocked by the QPACK stream */ req->handle_input = NULL; /* FIXME */ return 0; } on_error_before_head(req, *err_desc != NULL ? *err_desc : "qpack error"); return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; /* FIXME */ } if (header_ack_len != 0) h2o_http3_send_qpack_header_ack(&req->conn->super, header_ack, header_ack_len); /* handle 1xx */ if (100 <= status && status <= 199) { if (status == 101) { on_error_before_head(req, "unexpected 101"); return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; } if (frame_is_eos) { on_error_before_head(req, err == H2O_HTTP3_ERROR_EOS ? "unexpected close" : "reset by peer"); return 0; } if (req->super.informational_cb != NULL && req->super.informational_cb(&req->super, 0, status, h2o_iovec_init(NULL, 0), headers.entries, headers.size) != 0) { return H2O_HTTP3_ERROR_INTERNAL; } return 0; } /* handle final response */ if ((req->super._cb.on_body = req->super._cb.on_head(&req->super, frame_is_eos ? h2o_httpclient_error_is_eos : NULL, 0x300, status, h2o_iovec_init(NULL, 0), headers.entries, headers.size, 0)) == NULL) return frame_is_eos ? 0 : H2O_HTTP3_ERROR_INTERNAL; /* handle body */ req->handle_input = handle_input_expect_data_frame; return 0; } static void handle_input_error(struct st_h2o_http3client_req_t *req, int err) { const uint8_t *src = NULL, *src_end = NULL; const char *err_desc = NULL; req->handle_input(req, &src, src_end, err, &err_desc); } static void on_stream_destroy(quicly_stream_t *qs, int err) { struct st_h2o_http3client_req_t *req; if ((req = qs->data) == NULL) return; handle_input_error(req, H2O_HTTP3_ERROR_TRANSPORT); detach_stream(req); destroy_request(req); } static void on_send_shift(quicly_stream_t *qs, size_t delta) { struct st_h2o_http3client_req_t *req = qs->data; assert(req != NULL); h2o_buffer_consume(&req->sendbuf, delta); } static void on_send_emit(quicly_stream_t *qs, size_t off, void *dst, size_t *len, int *wrote_all) { struct st_h2o_http3client_req_t *req = qs->data; if (*len >= req->sendbuf->size - off) { *len = req->sendbuf->size - off; *wrote_all = 1; } else { *wrote_all = 0; } memcpy(dst, req->sendbuf->bytes + off, *len); if (*wrote_all && req->proceed_req.bytes_written != 0) { size_t bytes_written = req->proceed_req.bytes_written; req->proceed_req.bytes_written = 0; req->proceed_req.cb(&req->super, bytes_written, quicly_sendstate_is_open(&req->quic->sendstate) ? H2O_SEND_STATE_IN_PROGRESS : H2O_SEND_STATE_FINAL); } } static void on_send_stop(quicly_stream_t *qs, int err) { struct st_h2o_http3client_req_t *req; if ((req = qs->data) != NULL) { handle_input_error(req, err); close_stream(req, H2O_HTTP3_ERROR_REQUEST_CANCELLED); destroy_request(req); } } static int on_receive_process_bytes(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, const char **err_desc) { int ret, is_eos = quicly_recvstate_transfer_complete(&req->quic->recvstate); assert(is_eos || *src != src_end); do { if ((ret = req->handle_input(req, src, src_end, is_eos ? H2O_HTTP3_ERROR_EOS : 0, err_desc)) != 0) { if (ret == H2O_HTTP3_ERROR_INCOMPLETE) ret = is_eos ? H2O_HTTP3_ERROR_FRAME : 0; break; } } while (*src != src_end); return ret; } static void on_receive(quicly_stream_t *qs, size_t off, const void *input, size_t len) { struct st_h2o_http3client_req_t *req = qs->data; size_t bytes_consumed; int err = 0; const char *err_desc = NULL; /* process the input, update stream-level receive buffer */ if (req->recvbuf.stream->size == 0 && off == 0) { /* fast path; process the input directly, save the remaining bytes */ const uint8_t *src = input; err = on_receive_process_bytes(req, &src, src + len, &err_desc); bytes_consumed = src - (const uint8_t *)input; if (bytes_consumed != len) h2o_buffer_append(&req->recvbuf.stream, src, len - bytes_consumed); } else { /* slow path; copy data to partial_frame */ size_t size_required = off + len; if (req->recvbuf.stream->size < size_required) { H2O_HTTP3_CHECK_SUCCESS(h2o_buffer_reserve(&req->recvbuf.stream, size_required).base != NULL); req->recvbuf.stream->size = size_required; } memcpy(req->recvbuf.stream->bytes + off, input, len); /* just return if no new data is available */ size_t bytes_available = quicly_recvstate_bytes_available(&req->quic->recvstate); if (req->recvbuf.prev_bytes_available == bytes_available) return; /* process the bytes that have not been processed, update stream-level buffer */ const uint8_t *src = (const uint8_t *)req->recvbuf.stream->bytes; err = on_receive_process_bytes(req, &src, (const uint8_t *)req->recvbuf.stream->bytes + bytes_available, &err_desc); bytes_consumed = src - (const uint8_t *)req->recvbuf.stream->bytes; h2o_buffer_consume(&req->recvbuf.stream, bytes_consumed); } /* update QUIC stream-level state */ if (bytes_consumed != 0) quicly_stream_sync_recvbuf(req->quic, bytes_consumed); req->recvbuf.prev_bytes_available = quicly_recvstate_bytes_available(&req->quic->recvstate); /* cleanup */ if (quicly_recvstate_transfer_complete(&req->quic->recvstate)) { if (!quicly_sendstate_transfer_complete(&req->quic->sendstate)) quicly_reset_stream(req->quic, H2O_HTTP3_ERROR_NONE); detach_stream(req); destroy_request(req); } else if (err != 0) { /* FIXME all the errors are reported at stream-level. Is that correct? */ close_stream(req, err); destroy_request(req); } } static void on_receive_reset(quicly_stream_t *qs, int err) { struct st_h2o_http3client_req_t *req = qs->data; handle_input_error(req, err); close_stream(req, H2O_HTTP3_ERROR_REQUEST_CANCELLED); destroy_request(req); } static size_t emit_data(struct st_h2o_http3client_req_t *req, h2o_iovec_t payload) { size_t nbytes; { /* emit header */ uint8_t buf[9], *p = buf; *p++ = H2O_HTTP3_FRAME_TYPE_DATA; p = quicly_encodev(p, payload.len); nbytes = p - buf; h2o_buffer_append(&req->sendbuf, buf, nbytes); } /* emit payload */ h2o_buffer_append(&req->sendbuf, payload.base, payload.len); nbytes += payload.len; return nbytes; } void start_request(struct st_h2o_http3client_req_t *req) { h2o_iovec_t method; h2o_url_t url; const h2o_header_t *headers; size_t num_headers; h2o_iovec_t body; h2o_httpclient_properties_t props = {NULL}; int ret; assert(req->quic == NULL); assert(!h2o_linklist_is_linked(&req->link)); if ((req->super._cb.on_head = req->super._cb.on_connect(&req->super, NULL, &method, &url, &headers, &num_headers, &body, &req->proceed_req.cb, &props, &req->conn->server.origin_url)) == NULL) { destroy_request(req); return; } if ((ret = quicly_open_stream(req->conn->super.super.quic, &req->quic, 0)) != 0) { on_error_before_head(req, "failed to open stream"); destroy_request(req); return; } req->quic->data = req; /* send request (TODO optimize) */ h2o_byte_vector_t buf = {NULL}; h2o_http3_encode_frame(req->super.pool, &buf, H2O_HTTP3_FRAME_TYPE_HEADERS, { h2o_qpack_flatten_request(req->conn->super.qpack.enc, req->super.pool, req->quic->stream_id, NULL, &buf, method, url.scheme, url.authority, url.path, headers, num_headers); }); h2o_buffer_append(&req->sendbuf, buf.entries, buf.size); if (body.len != 0) { emit_data(req, body); if (req->proceed_req.cb != NULL) req->proceed_req.bytes_written = body.len; } if (req->proceed_req.cb == NULL) quicly_sendstate_shutdown(&req->quic->sendstate, req->sendbuf->size); quicly_stream_sync_sendbuf(req->quic, 1); req->handle_input = handle_input_expect_headers; } static void cancel_request(h2o_httpclient_t *_client) { struct st_h2o_http3client_req_t *req = (void *)_client; if (req->quic != NULL) close_stream(req, H2O_HTTP3_ERROR_REQUEST_CANCELLED); destroy_request(req); } static void do_update_window(h2o_httpclient_t *_client) { /* TODO Stop receiving data for the stream when `buf` grows to certain extent. Then, resume when this function is being called. */ } static int do_write_req(h2o_httpclient_t *_client, h2o_iovec_t chunk, int is_end_stream) { struct st_h2o_http3client_req_t *req = (void *)_client; assert(req->quic != NULL && quicly_sendstate_is_open(&req->quic->sendstate)); assert(req->proceed_req.bytes_written == 0); emit_data(req, chunk); /* shutdown if we've written all request body */ if (is_end_stream) { assert(quicly_sendstate_is_open(&req->quic->sendstate)); quicly_sendstate_shutdown(&req->quic->sendstate, req->quic->sendstate.acked.ranges[0].end + req->sendbuf->size); } req->proceed_req.bytes_written = chunk.len; quicly_stream_sync_sendbuf(req->quic, 1); h2o_quic_schedule_timer(&req->conn->super.super); return 0; } void h2o_httpclient_connect_h3(h2o_httpclient_t **_client, h2o_mem_pool_t *pool, void *data, h2o_httpclient_ctx_t *ctx, h2o_url_t *target, h2o_httpclient_connect_cb cb) { struct st_h2o_http3client_conn_t *conn; struct st_h2o_http3client_req_t *req; if ((conn = find_connection_for_origin(ctx, target->scheme, target->authority)) == NULL) conn = create_connection(ctx, target); req = h2o_mem_alloc(sizeof(*req)); *req = (struct st_h2o_http3client_req_t){{pool, ctx, NULL, &req->recvbuf.body, data, NULL, {h2o_gettimeofday(ctx->loop)}, {0}, {0}, cancel_request, NULL /* steal_socket */, NULL /* get_socket */, do_update_window, do_write_req}, conn}; req->super._cb.on_connect = cb; h2o_buffer_init(&req->sendbuf, &h2o_socket_buffer_prototype); h2o_buffer_init(&req->recvbuf.body, &h2o_socket_buffer_prototype); h2o_buffer_init(&req->recvbuf.stream, &h2o_socket_buffer_prototype); if (h2o_http3_has_received_settings(&conn->super)) { start_request(req); h2o_quic_schedule_timer(&conn->super.super); } else { h2o_linklist_insert(&conn->pending_requests, &req->link); } } void h2o_httpclient_http3_notify_connection_update(h2o_quic_ctx_t *ctx, h2o_quic_conn_t *_conn) { struct st_h2o_http3client_conn_t *conn = (void *)_conn; if (h2o_timer_is_linked(&conn->timeout) && conn->timeout.cb == on_connect_timeout) { /* TODO check connection state? */ h2o_timer_unlink(&conn->timeout); } } static int stream_open_cb(quicly_stream_open_t *self, quicly_stream_t *qs) { if (quicly_stream_is_unidirectional(qs->stream_id)) { h2o_http3_on_create_unidirectional_stream(qs); } else { static const quicly_stream_callbacks_t callbacks = {on_stream_destroy, on_send_shift, on_send_emit, on_send_stop, on_receive, on_receive_reset}; assert(quicly_stream_is_client_initiated(qs->stream_id)); qs->callbacks = &callbacks; } return 0; } quicly_stream_open_t h2o_httpclient_http3_on_stream_open = {stream_open_cb};
1
14,440
Declaration here is `static` but the definition at the bottom is non-static?
h2o-h2o
c
@@ -65,6 +65,9 @@ type WriterOptions struct { // write in a single request, if supported. Larger objects will be split into // multiple requests. BufferSize int + // Content-MD5 which may be used as a message integrity check (MIC) + // https://tools.ietf.org/html/rfc1864 + ContentMD5 string // Metadata holds key/value strings to be associated with the blob. // Keys are guaranteed to be non-empty and lowercased. Metadata map[string]string
1
// Copyright 2018 The Go Cloud Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package driver defines a set of interfaces that the blob package uses to interact // with the underlying blob services. package driver import ( "context" "io" "time" ) // ErrorKind is a code to indicate the kind of failure. type ErrorKind int const ( // GenericError is the default ErrorKind. GenericError ErrorKind = iota // NotFound indicates that the referenced key does not exist. NotFound // NotImplemented indicates that the provider does not support this operation. NotImplemented ) // Error is an interface that may be implemented by an error returned by // a driver to indicate the kind of failure. If an error does not have the // Kind method, then it is assumed to be GenericError. type Error interface { error Kind() ErrorKind } // Reader reads an object from the blob. type Reader interface { io.ReadCloser // Attributes returns a subset of attributes about the blob. Attributes() ReaderAttributes // As allows providers to expose provider-specific types; // see Bucket.As for more details. As(interface{}) bool } // Writer writes an object to the blob. type Writer interface { io.WriteCloser } // WriterOptions controls behaviors of Writer. type WriterOptions struct { // BufferSize changes the default size in byte of the maximum part Writer can // write in a single request, if supported. Larger objects will be split into // multiple requests. BufferSize int // Metadata holds key/value strings to be associated with the blob. // Keys are guaranteed to be non-empty and lowercased. Metadata map[string]string // BeforeWrite is a callback that must be called exactly once before // any data is written, unless NewTypedWriter returns an error, in // which case it should not be called. // asFunc allows providers to expose provider-specific types; // see Bucket.As for more details. BeforeWrite func(asFunc func(interface{}) bool) error } // ReaderAttributes contains a subset of attributes about a blob that are // accessible from Reader. type ReaderAttributes struct { // ContentType is the MIME type of the blob object. It must not be empty. ContentType string // ModTime is the time the blob object was last modified. ModTime time.Time // Size is the size of the object in bytes. Size int64 } // Attributes contains attributes about a blob. type Attributes struct { // ContentType is the MIME type of the blob object. It must not be empty. ContentType string // Metadata holds key/value pairs associated with the blob. // Keys will be lowercased by the concrete type before being returned // to the user. If there are duplicate case-insensitive keys (e.g., // "foo" and "FOO"), only one value will be kept, and it is undefined // which one. Metadata map[string]string // ModTime is the time the blob object was last modified. ModTime time.Time // Size is the size of the object in bytes. Size int64 // AsFunc allows providers to expose provider-specific types; // see Bucket.As for more details. // If not set, no provider-specific types are supported. AsFunc func(interface{}) bool } // ListOptions sets options for listing objects in the bucket. // TODO(Issue #541): Add Delimiter. type ListOptions struct { // Prefix indicates that only results with the given prefix should be // returned. Prefix string // PageSize sets the maximum number of objects to be returned. // 0 means no maximum; driver implementations should choose a reasonable // max. PageSize int // PageToken may be filled in with the NextPageToken from a previous // ListPaged call. PageToken []byte // BeforeList is a callback that must be called exactly once during ListPaged, // before the underlying provider's list is executed. // asFunc allows providers to expose provider-specific types; // see Bucket.As for more details. BeforeList func(asFunc func(interface{}) bool) error } // ListObject represents a specific blob object returned from ListPaged. type ListObject struct { // Key is the key for this blob. Key string // ModTime is the time the blob object was last modified. ModTime time.Time // Size is the size of the object in bytes. Size int64 // AsFunc allows providers to expose provider-specific types; // see Bucket.As for more details. // If not set, no provider-specific types are supported. AsFunc func(interface{}) bool } // ListPage represents a page of results return from ListPaged. type ListPage struct { // Objects is the slice of objects found. If ListOptions.PageSize != 0, // it should have at most ListOptions.PageSize entries. Objects []*ListObject // NextPageToken should be left empty unless there are more objects // to return. The value may be returned as ListOptions.PageToken on a // subsequent ListPaged call, to fetch the next page of results. // It can be an arbitrary []byte; it need not be a valid key. NextPageToken []byte } // Bucket provides read, write and delete operations on objects within it on the // blob service. type Bucket interface { // As allows providers to expose provider-specific types. // // i will be a pointer to the type the user wants filled in. // As should either fill it in and return true, or return false. // // Mutable objects should be exposed as a pointer to the object; // i will therefore be a **. // // A provider should document the type(s) it support in package // comments, and add conformance tests verifying them. // // A sample implementation might look like this, for supporting foo.MyType: // mt, ok := i.(*foo.MyType) // if !ok { // return false // } // *i = foo.MyType{} // or, more likely, the existing value // return true // // See // https://github.com/google/go-cloud/blob/master/internal/docs/design.md#as // for more background. As(i interface{}) bool // Attributes returns attributes for the blob. If the specified object does // not exist, Attributes must return an error whose Kind method returns // NotFound. Attributes(ctx context.Context, key string) (Attributes, error) // ListPaged lists objects in the bucket, in lexicographical order by // UTF-8-encoded key, returning pages of objects at a time. // Providers are only required to be eventually consistent with respect // to recently written or deleted objects. That is to say, there is no // guarantee that an object that's been written will immediately be returned // from ListPaged. // opt is guaranteed to be non-nil. ListPaged(ctx context.Context, opt *ListOptions) (*ListPage, error) // NewRangeReader returns a Reader that reads part of an object, reading at // most length bytes starting at the given offset. If length is negative, it // will read until the end of the object. If the specified object does not // exist, NewRangeReader must return an error whose Kind method returns // NotFound. NewRangeReader(ctx context.Context, key string, offset, length int64) (Reader, error) // NewTypedWriter returns Writer that writes to an object associated with key. // // A new object will be created unless an object with this key already exists. // Otherwise any previous object with the same key will be replaced. // The object may not be available (and any previous object will remain) // until Close has been called. // // contentType sets the MIME type of the object to be written. It must not be // empty. // // The caller must call Close on the returned Writer when done writing. // // Implementations should abort an ongoing write if ctx is later canceled, // and do any necessary cleanup in Close. Close should then return ctx.Err(). NewTypedWriter(ctx context.Context, key string, contentType string, opt *WriterOptions) (Writer, error) // Delete deletes the object associated with key. If the specified object does // not exist, NewRangeReader must return an error whose Kind method // returns NotFound. Delete(ctx context.Context, key string) error // SignedURL returns a URL that can be used to GET the blob for the duration // specified in opts.Expiry. opts is guaranteed to be non-nil. // If not supported, return an error whose Kind method returns NotImplemented. SignedURL(ctx context.Context, key string, opts *SignedURLOptions) (string, error) } // SignedURLOptions sets options for SignedURL. type SignedURLOptions struct { // Expiry sets how long the returned URL is valid for. It is guaranteed to be > 0. Expiry time.Duration }
1
11,572
Hi @myml, thanks for the contribution! `blob` and `blob/driver` are both in the same module, so you shouldn't need to split this change up into multiple Pull Requests. Also, I'd like to see it working, including the implementation for `s3blob` and `gcsblob` (these should be easy, just pass-through to the provider) and `fileblob` (might be trickier...), and a test in `drivertest/drivertest.go`.
google-go-cloud
go
@@ -610,11 +610,11 @@ func (i *Initializer) configureGatewayInterface(gatewayIface *interfacestore.Int gatewayIface.IPs = []net.IP{} if i.networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() { // Assign IP to gw as required by SpoofGuard. - if i.nodeConfig.NodeIPv4Addr != nil { + if i.nodeConfig.NodeTransportIPv4Addr != nil { i.nodeConfig.GatewayConfig.IPv4 = i.nodeConfig.NodeTransportIPv4Addr.IP gatewayIface.IPs = append(gatewayIface.IPs, i.nodeConfig.NodeTransportIPv4Addr.IP) } - if i.nodeConfig.NodeIPv6Addr != nil { + if i.nodeConfig.NodeTransportIPv6Addr != nil { i.nodeConfig.GatewayConfig.IPv6 = i.nodeConfig.NodeTransportIPv6Addr.IP gatewayIface.IPs = append(gatewayIface.IPs, i.nodeConfig.NodeTransportIPv6Addr.IP) }
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package agent import ( "context" "encoding/json" "fmt" "net" "os" "strconv" "strings" "sync" "time" "github.com/containernetworking/plugins/pkg/ip" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" "antrea.io/antrea/pkg/agent/cniserver" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/controller/noderoute" "antrea.io/antrea/pkg/agent/interfacestore" "antrea.io/antrea/pkg/agent/openflow" "antrea.io/antrea/pkg/agent/openflow/cookie" "antrea.io/antrea/pkg/agent/route" "antrea.io/antrea/pkg/agent/types" "antrea.io/antrea/pkg/agent/util" "antrea.io/antrea/pkg/agent/wireguard" "antrea.io/antrea/pkg/features" "antrea.io/antrea/pkg/ovs/ovsconfig" "antrea.io/antrea/pkg/ovs/ovsctl" "antrea.io/antrea/pkg/util/env" "antrea.io/antrea/pkg/util/k8s" ) const ( // Default name of the default tunnel interface on the OVS bridge. defaultTunInterfaceName = "antrea-tun0" maxRetryForHostLink = 5 // ipsecPSKEnvKey is environment variable. ipsecPSKEnvKey = "ANTREA_IPSEC_PSK" roundNumKey = "roundNum" // round number key in externalIDs. initialRoundNum = 1 maxRetryForRoundNumSave = 5 ) var ( // getIPNetDeviceFromIP is meant to be overridden for testing. getIPNetDeviceFromIP = util.GetIPNetDeviceFromIP // getIPNetDeviceByV4CIDR is meant to be overridden for testing. getIPNetDeviceByCIDRs = util.GetIPNetDeviceByCIDRs // getTransportIPNetDeviceByName is meant to be overridden for testing. getTransportIPNetDeviceByName = GetTransportIPNetDeviceByName ) // Initializer knows how to setup host networking, OpenVSwitch, and Openflow. type Initializer struct { client clientset.Interface ovsBridgeClient ovsconfig.OVSBridgeClient ofClient openflow.Client routeClient route.Interface wireGuardClient wireguard.Interface ifaceStore interfacestore.InterfaceStore ovsBridge string hostGateway string // name of gateway port on the OVS bridge mtu int serviceCIDR *net.IPNet // K8s Service ClusterIP CIDR serviceCIDRv6 *net.IPNet // K8s Service ClusterIP CIDR in IPv6 networkConfig *config.NetworkConfig nodeConfig *config.NodeConfig wireGuardConfig *config.WireGuardConfig egressConfig *config.EgressConfig enableProxy bool connectUplinkToBridge bool // networkReadyCh should be closed once the Node's network is ready. // The CNI server will wait for it before handling any CNI Add requests. proxyAll bool nodePortAddressesIPv4 []net.IP nodePortAddressesIPv6 []net.IP networkReadyCh chan<- struct{} stopCh <-chan struct{} } func NewInitializer( k8sClient clientset.Interface, ovsBridgeClient ovsconfig.OVSBridgeClient, ofClient openflow.Client, routeClient route.Interface, ifaceStore interfacestore.InterfaceStore, ovsBridge string, hostGateway string, mtu int, serviceCIDR *net.IPNet, serviceCIDRv6 *net.IPNet, networkConfig *config.NetworkConfig, wireGuardConfig *config.WireGuardConfig, egressConfig *config.EgressConfig, networkReadyCh chan<- struct{}, stopCh <-chan struct{}, enableProxy bool, proxyAll bool, nodePortAddressesIPv4 []net.IP, nodePortAddressesIPv6 []net.IP, connectUplinkToBridge bool, ) *Initializer { return &Initializer{ ovsBridgeClient: ovsBridgeClient, client: k8sClient, ifaceStore: ifaceStore, ofClient: ofClient, routeClient: routeClient, ovsBridge: ovsBridge, hostGateway: hostGateway, mtu: mtu, serviceCIDR: serviceCIDR, serviceCIDRv6: serviceCIDRv6, networkConfig: networkConfig, wireGuardConfig: wireGuardConfig, egressConfig: egressConfig, networkReadyCh: networkReadyCh, stopCh: stopCh, enableProxy: enableProxy, proxyAll: proxyAll, nodePortAddressesIPv4: nodePortAddressesIPv4, nodePortAddressesIPv6: nodePortAddressesIPv6, connectUplinkToBridge: connectUplinkToBridge, } } // GetNodeConfig returns the NodeConfig. func (i *Initializer) GetNodeConfig() *config.NodeConfig { return i.nodeConfig } // GetNodeConfig returns the NodeConfig. func (i *Initializer) GetWireGuardClient() wireguard.Interface { return i.wireGuardClient } // setupOVSBridge sets up the OVS bridge and create host gateway interface and tunnel port func (i *Initializer) setupOVSBridge() error { if err := i.ovsBridgeClient.Create(); err != nil { klog.Error("Failed to create OVS bridge: ", err) return err } if err := i.validateSupportedDPFeatures(); err != nil { return err } if err := i.prepareOVSBridge(); err != nil { return err } // Initialize interface cache if err := i.initInterfaceStore(); err != nil { return err } if err := i.setupDefaultTunnelInterface(); err != nil { return err } // Set up host gateway interface err := i.setupGatewayInterface() if err != nil { return err } return nil } func (i *Initializer) validateSupportedDPFeatures() error { gotFeatures, err := ovsctl.NewClient(i.ovsBridge).GetDPFeatures() if err != nil { return err } // Basic requirements. requiredFeatures := []ovsctl.DPFeature{ ovsctl.CTStateFeature, ovsctl.CTZoneFeature, ovsctl.CTMarkFeature, ovsctl.CTLabelFeature, } // AntreaProxy requires CTStateNAT feature. if features.DefaultFeatureGate.Enabled(features.AntreaProxy) { requiredFeatures = append(requiredFeatures, ovsctl.CTStateNATFeature) } for _, feature := range requiredFeatures { supported, found := gotFeatures[feature] if !found { return fmt.Errorf("the required OVS DP feature '%s' support is unknown", feature) } if !supported { return fmt.Errorf("the required OVS DP feature '%s' is not supported", feature) } } return nil } // initInterfaceStore initializes InterfaceStore with all OVS ports retrieved // from the OVS bridge. func (i *Initializer) initInterfaceStore() error { ovsPorts, err := i.ovsBridgeClient.GetPortList() if err != nil { klog.Errorf("Failed to list OVS ports: %v", err) return err } ifaceList := make([]*interfacestore.InterfaceConfig, 0, len(ovsPorts)) uplinkIfName := i.nodeConfig.UplinkNetConfig.Name for index := range ovsPorts { port := &ovsPorts[index] ovsPort := &interfacestore.OVSPortConfig{ PortUUID: port.UUID, OFPort: port.OFPort} var intf *interfacestore.InterfaceConfig switch { case port.OFPort == config.HostGatewayOFPort: intf = &interfacestore.InterfaceConfig{ Type: interfacestore.GatewayInterface, InterfaceName: port.Name, OVSPortConfig: ovsPort} if intf.InterfaceName != i.hostGateway { klog.Warningf("The discovered gateway interface name %s is different from the configured value: %s", intf.InterfaceName, i.hostGateway) // Set the gateway interface name to the discovered name. i.hostGateway = intf.InterfaceName } case port.Name == uplinkIfName: intf = &interfacestore.InterfaceConfig{ Type: interfacestore.UplinkInterface, InterfaceName: port.Name, OVSPortConfig: ovsPort, } case port.IFType == ovsconfig.GeneveTunnel: fallthrough case port.IFType == ovsconfig.VXLANTunnel: fallthrough case port.IFType == ovsconfig.GRETunnel: fallthrough case port.IFType == ovsconfig.STTTunnel: intf = noderoute.ParseTunnelInterfaceConfig(port, ovsPort) if intf != nil && port.OFPort == config.DefaultTunOFPort && intf.InterfaceName != i.nodeConfig.DefaultTunName { klog.Infof("The discovered default tunnel interface name %s is different from the default value: %s", intf.InterfaceName, i.nodeConfig.DefaultTunName) // Set the default tunnel interface name to the discovered name. i.nodeConfig.DefaultTunName = intf.InterfaceName } default: // The port should be for a container interface. intf = cniserver.ParseOVSPortInterfaceConfig(port, ovsPort, true) } if intf != nil { ifaceList = append(ifaceList, intf) } } i.ifaceStore.Initialize(ifaceList) return nil } // Initialize sets up agent initial configurations. func (i *Initializer) Initialize() error { klog.Info("Setting up node network") // wg is used to wait for the asynchronous initialization. var wg sync.WaitGroup if err := i.initNodeLocalConfig(); err != nil { return err } if err := i.prepareHostNetwork(); err != nil { return err } if err := i.setupOVSBridge(); err != nil { return err } // initializeWireGuard must be executed after setupOVSBridge as it requires gateway addresses on the OVS bridge. switch i.networkConfig.TrafficEncryptionMode { case config.TrafficEncryptionModeIPSec: if err := i.initializeIPSec(); err != nil { return err } case config.TrafficEncryptionModeWireGuard: if err := i.initializeWireGuard(); err != nil { return err } } wg.Add(1) // routeClient.Initialize() should be after i.setupOVSBridge() which // creates the host gateway interface. if err := i.routeClient.Initialize(i.nodeConfig, wg.Done); err != nil { return err } // Install OpenFlow entries on OVS bridge. if err := i.initOpenFlowPipeline(); err != nil { return err } // The Node's network is ready only when both synchronous and asynchronous initialization are done. go func() { wg.Wait() close(i.networkReadyCh) }() klog.Infof("Agent initialized NodeConfig=%v, NetworkConfig=%v", i.nodeConfig, i.networkConfig) return nil } // persistRoundNum will save the provided round number to OVSDB as an external ID. To account for // transient failures, this (synchronous) function includes a retry mechanism. func persistRoundNum(num uint64, bridgeClient ovsconfig.OVSBridgeClient, interval time.Duration, maxRetries int) { klog.Infof("Persisting round number %d to OVSDB", num) retry := 0 for { err := saveRoundNum(num, bridgeClient) if err == nil { klog.Infof("Round number %d was persisted to OVSDB", num) return // success } klog.Errorf("Error when writing round number to OVSDB: %v", err) if retry >= maxRetries { break } time.Sleep(interval) } klog.Errorf("Unable to persist round number %d to OVSDB after %d tries", num, maxRetries+1) } // initOpenFlowPipeline sets up necessary Openflow entries, including pipeline, classifiers, conn_track, and gateway flows // Every time the agent is (re)started, we go through the following sequence: // 1. agent determines the new round number (this is done by incrementing the round number // persisted in OVSDB, or if it's not available by picking round 1). // 2. any existing flow for which the round number matches the round number obtained from step 1 // is deleted. // 3. all required flows are installed, using the round number obtained from step 1. // 4. after convergence, all existing flows for which the round number matches the previous round // number (i.e. the round number which was persisted in OVSDB, if any) are deleted. // 5. the new round number obtained from step 1 is persisted to OVSDB. // The rationale for not persisting the new round number until after all previous flows have been // deleted is to avoid a situation in which some stale flows are never deleted because of successive // agent restarts (with the agent crashing before step 4 can be completed). With the sequence // described above, We guarantee that at most two rounds of flows exist in the switch at any given // time. // Note that at the moment we assume that all OpenFlow groups are deleted every time there is an // Antrea Agent restart. This allows us to add the necessary groups without having to worry about // the operation failing because a (stale) group with the same ID already exists in OVS. This // assumption is currently guaranteed by the ofnet implementation: // https://github.com/wenyingd/ofnet/blob/14a78b27ef8762e45a0cfc858c4d07a4572a99d5/ofctrl/fgraphSwitch.go#L57-L62 // All previous groups have been deleted by the time the call to i.ofClient.Initialize returns. func (i *Initializer) initOpenFlowPipeline() error { roundInfo := getRoundInfo(i.ovsBridgeClient) // Set up all basic flows. ofConnCh, err := i.ofClient.Initialize(roundInfo, i.nodeConfig, i.networkConfig) if err != nil { klog.Errorf("Failed to initialize openflow client: %v", err) return err } // On Windows platform, host network flows are needed for host traffic. if err := i.initHostNetworkFlows(); err != nil { klog.Errorf("Failed to install openflow entries for host network: %v", err) return err } // Install OpenFlow entries to enable Pod traffic to external IP // addresses. if err := i.ofClient.InstallExternalFlows(i.egressConfig.ExceptCIDRs); err != nil { klog.Errorf("Failed to install openflow entries for external connectivity: %v", err) return err } // Set up flow entries for gateway interface, including classifier, skip spoof guard check, // L3 forwarding and L2 forwarding if err := i.ofClient.InstallGatewayFlows(); err != nil { klog.Errorf("Failed to setup openflow entries for gateway: %v", err) return err } if i.networkConfig.TrafficEncapMode.SupportsEncap() { // Set up flow entries for the default tunnel port interface. if err := i.ofClient.InstallDefaultTunnelFlows(); err != nil { klog.Errorf("Failed to setup openflow entries for tunnel interface: %v", err) return err } } if !i.enableProxy { // Set up flow entries to enable Service connectivity. Upstream kube-proxy is leveraged to // provide load-balancing, and the flows installed by this method ensure that traffic sent // from local Pods to any Service address can be forwarded to the host gateway interface // correctly. Otherwise packets might be dropped by egress rules before they are DNATed to // backend Pods. if err := i.ofClient.InstallClusterServiceCIDRFlows([]*net.IPNet{i.serviceCIDR, i.serviceCIDRv6}); err != nil { klog.Errorf("Failed to setup OpenFlow entries for Service CIDRs: %v", err) return err } } else { // Set up flow entries to enable Service connectivity. The agent proxy handles // ClusterIP Services while the upstream kube-proxy is leveraged to handle // any other kinds of Services. if err := i.ofClient.InstallDefaultServiceFlows(i.nodePortAddressesIPv4, i.nodePortAddressesIPv6); err != nil { klog.Errorf("Failed to setup default OpenFlow entries for ClusterIP Services: %v", err) return err } } go func() { // Delete stale flows from previous round. We need to wait long enough to ensure // that all the flow which are still required have received an updated cookie (with // the new round number), otherwise we would disrupt the dataplane. Unfortunately, // the time required for convergence may be large and there is no simple way to // determine when is a right time to perform the cleanup task. // TODO: introduce a deterministic mechanism through which the different entities // responsible for installing flows can notify the agent that this deletion // operation can take place. A waitGroup can be created here and notified when // full sync in agent networkpolicy controller is complete. This would signal NP // flows have been synced once. Other mechanisms are still needed for node flows // fullSync check. time.Sleep(10 * time.Second) klog.Info("Deleting stale flows from previous round if any") if err := i.ofClient.DeleteStaleFlows(); err != nil { klog.Errorf("Error when deleting stale flows from previous round: %v", err) return } persistRoundNum(roundInfo.RoundNum, i.ovsBridgeClient, 1*time.Second, maxRetryForRoundNumSave) }() go func() { for { if _, ok := <-ofConnCh; !ok { return } klog.Info("Replaying OF flows to OVS bridge") i.ofClient.ReplayFlows() klog.Info("Flow replay completed") if i.ovsBridgeClient.GetOVSDatapathType() == ovsconfig.OVSDatapathNetdev { // we don't set flow-restore-wait when using the OVS netdev datapath return } // ofClient and ovsBridgeClient have their own mechanisms to restore connections with OVS, and it could // happen that ovsBridgeClient's connection is not ready when ofClient completes flow replay. We retry it // with a timeout that is longer time than ovsBridgeClient's maximum connecting retry interval (8 seconds) // to ensure the flag can be removed successfully. err := wait.PollImmediate(200*time.Millisecond, 10*time.Second, func() (done bool, err error) { if err := i.FlowRestoreComplete(); err != nil { return false, nil } return true, nil }) // This shouldn't happen unless OVS is disconnected again after replaying flows. If it happens, we will try // to clean up the config again so an error log should be fine. if err != nil { klog.Errorf("Failed to clean up flow-restore-wait config: %v", err) } } }() return nil } func (i *Initializer) FlowRestoreComplete() error { // Issue #1600: A rare case has been found that the "flow-restore-wait" config was still true even though the delete // call below was considered success. At the moment we don't know if it's a race condition caused by "ovs-vsctl set // --no-wait" or a problem with OVSDB golang lib or OVSDB itself. To work around it, we check if the config is true // before deleting it and if it is false after deleting it, and we will log warnings and retry a few times if // anything unexpected happens. // If the issue can still happen, it must be that some other code sets the config back after it's deleted. getFlowRestoreWait := func() (bool, error) { otherConfig, err := i.ovsBridgeClient.GetOVSOtherConfig() if err != nil { return false, fmt.Errorf("error when getting OVS other config") } return otherConfig["flow-restore-wait"] == "true", nil } // "flow-restore-wait" is supposed to be true here. err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (done bool, err error) { flowRestoreWait, err := getFlowRestoreWait() if err != nil { return false, err } if !flowRestoreWait { // If the log is seen and the config becomes true later, we should look at why "ovs-vsctl set --no-wait" // doesn't take effect on ovsdb immediately. klog.Warning("flow-restore-wait was not true before the delete call was made, will retry") return false, nil } return true, nil }) if err != nil { if err == wait.ErrWaitTimeout { // This could happen if the method is triggered by OVS disconnection event, in which OVS doesn't restart. klog.Info("flow-restore-wait was not true, skip cleaning it up") return nil } return err } for retries := 0; retries < 3; retries++ { // ovs-vswitchd is started with flow-restore-wait set to true for the following reasons: // 1. It prevents packets from being mishandled by ovs-vswitchd in its default fashion, // which could affect existing connections' conntrack state and cause issues like #625. // 2. It prevents ovs-vswitchd from flushing or expiring previously set datapath flows, // so existing connections can achieve 0 downtime during OVS restart. // As a result, we remove the config here after restoring necessary flows. klog.Info("Cleaning up flow-restore-wait config") if err := i.ovsBridgeClient.DeleteOVSOtherConfig(map[string]interface{}{"flow-restore-wait": "true"}); err != nil { return fmt.Errorf("error when cleaning up flow-restore-wait config: %v", err) } flowRestoreWait, err := getFlowRestoreWait() if err != nil { return err } if flowRestoreWait { // If it is seen, we should look at OVSDB golang lib and OVS. klog.Warningf("flow-restore-wait was still true even though the delete call was considered success") continue } klog.Info("Cleaned up flow-restore-wait config") return nil } return fmt.Errorf("error when cleaning up flow-restore-wait config: delete calls failed to take effect") } // setupGatewayInterface creates the host gateway interface which is an internal port on OVS. The ofport for host // gateway interface is predefined, so invoke CreateInternalPort with a specific ofport_request func (i *Initializer) setupGatewayInterface() error { // Create host Gateway port if it does not exist gatewayIface, portExists := i.ifaceStore.GetInterface(i.hostGateway) if !portExists { klog.V(2).Infof("Creating gateway port %s on OVS bridge", i.hostGateway) gwPortUUID, err := i.ovsBridgeClient.CreateInternalPort(i.hostGateway, config.HostGatewayOFPort, nil) if err != nil { klog.Errorf("Failed to create gateway port %s on OVS bridge: %v", i.hostGateway, err) return err } gatewayIface = interfacestore.NewGatewayInterface(i.hostGateway) gatewayIface.OVSPortConfig = &interfacestore.OVSPortConfig{PortUUID: gwPortUUID, OFPort: config.HostGatewayOFPort} i.ifaceStore.AddInterface(gatewayIface) } else { klog.V(2).Infof("Gateway port %s already exists on OVS bridge", i.hostGateway) } // Idempotent operation to set the gateway's MTU: we perform this operation regardless of // whether or not the gateway interface already exists, as the desired MTU may change across // restarts. klog.V(4).Infof("Setting gateway interface %s MTU to %d", i.hostGateway, i.nodeConfig.NodeMTU) i.ovsBridgeClient.SetInterfaceMTU(i.hostGateway, i.nodeConfig.NodeMTU) if err := i.configureGatewayInterface(gatewayIface); err != nil { return err } return nil } func (i *Initializer) configureGatewayInterface(gatewayIface *interfacestore.InterfaceConfig) error { var gwMAC net.HardwareAddr var gwLinkIdx int var err error // Host link might not be queried at once after creating OVS internal port; retry max 5 times with 1s // delay each time to ensure the link is ready. for retry := 0; retry < maxRetryForHostLink; retry++ { gwMAC, gwLinkIdx, err = util.SetLinkUp(i.hostGateway) if err == nil { break } if _, ok := err.(util.LinkNotFound); ok { klog.V(2).Infof("Not found host link for gateway %s, retry after 1s", i.hostGateway) time.Sleep(1 * time.Second) continue } return err } if err != nil { klog.Errorf("Failed to find host link for gateway %s: %v", i.hostGateway, err) return err } i.nodeConfig.GatewayConfig = &config.GatewayConfig{Name: i.hostGateway, MAC: gwMAC} gatewayIface.MAC = gwMAC gatewayIface.IPs = []net.IP{} if i.networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() { // Assign IP to gw as required by SpoofGuard. if i.nodeConfig.NodeIPv4Addr != nil { i.nodeConfig.GatewayConfig.IPv4 = i.nodeConfig.NodeTransportIPv4Addr.IP gatewayIface.IPs = append(gatewayIface.IPs, i.nodeConfig.NodeTransportIPv4Addr.IP) } if i.nodeConfig.NodeIPv6Addr != nil { i.nodeConfig.GatewayConfig.IPv6 = i.nodeConfig.NodeTransportIPv6Addr.IP gatewayIface.IPs = append(gatewayIface.IPs, i.nodeConfig.NodeTransportIPv6Addr.IP) } // No need to assign local CIDR to gw0 because local CIDR is not managed by Antrea return nil } i.nodeConfig.GatewayConfig.LinkIndex = gwLinkIdx // Allocate the gateway IP address for each Pod CIDR allocated to the Node. For each CIDR, // the first address in the subnet is assigned to the host gateway interface. podCIDRs := []*net.IPNet{i.nodeConfig.PodIPv4CIDR, i.nodeConfig.PodIPv6CIDR} if err := i.allocateGatewayAddresses(podCIDRs, gatewayIface); err != nil { return err } return nil } func (i *Initializer) setupDefaultTunnelInterface() error { tunnelPortName := i.nodeConfig.DefaultTunName tunnelIface, portExists := i.ifaceStore.GetInterface(tunnelPortName) localIP := i.getTunnelPortLocalIP() localIPStr := "" if localIP != nil { localIPStr = localIP.String() } // Enabling UDP checksum can greatly improve the performance for Geneve and // VXLAN tunnels by triggering GRO on the receiver. shouldEnableCsum := i.networkConfig.TunnelType == ovsconfig.GeneveTunnel || i.networkConfig.TunnelType == ovsconfig.VXLANTunnel // Check the default tunnel port. if portExists { if i.networkConfig.TrafficEncapMode.SupportsEncap() && tunnelIface.TunnelInterfaceConfig.Type == i.networkConfig.TunnelType && tunnelIface.TunnelInterfaceConfig.LocalIP.Equal(localIP) { klog.V(2).Infof("Tunnel port %s already exists on OVS bridge", tunnelPortName) // This could happen when upgrading from previous versions that didn't set it. if shouldEnableCsum && !tunnelIface.TunnelInterfaceConfig.Csum { if err := i.enableTunnelCsum(tunnelPortName); err != nil { return fmt.Errorf("failed to enable csum for tunnel port %s: %v", tunnelPortName, err) } tunnelIface.TunnelInterfaceConfig.Csum = true } return nil } if err := i.ovsBridgeClient.DeletePort(tunnelIface.PortUUID); err != nil { if i.networkConfig.TrafficEncapMode.SupportsEncap() { return fmt.Errorf("failed to remove tunnel port %s with wrong tunnel type: %s", tunnelPortName, err) } klog.Errorf("Failed to remove tunnel port %s in NoEncapMode: %v", tunnelPortName, err) } else { klog.Infof("Removed tunnel port %s with tunnel type: %s", tunnelPortName, tunnelIface.TunnelInterfaceConfig.Type) i.ifaceStore.DeleteInterface(tunnelIface) } } // Create the default tunnel port and interface. if i.networkConfig.TrafficEncapMode.SupportsEncap() { if tunnelPortName != defaultTunInterfaceName { // Reset the tunnel interface name to the desired name before // recreating the tunnel port and interface. tunnelPortName = defaultTunInterfaceName i.nodeConfig.DefaultTunName = tunnelPortName } tunnelPortUUID, err := i.ovsBridgeClient.CreateTunnelPortExt(tunnelPortName, i.networkConfig.TunnelType, config.DefaultTunOFPort, shouldEnableCsum, localIPStr, "", "", nil) if err != nil { klog.Errorf("Failed to create tunnel port %s type %s on OVS bridge: %v", tunnelPortName, i.networkConfig.TunnelType, err) return err } tunnelIface = interfacestore.NewTunnelInterface(tunnelPortName, i.networkConfig.TunnelType, localIP, shouldEnableCsum) tunnelIface.OVSPortConfig = &interfacestore.OVSPortConfig{PortUUID: tunnelPortUUID, OFPort: config.DefaultTunOFPort} i.ifaceStore.AddInterface(tunnelIface) } return nil } func (i *Initializer) enableTunnelCsum(tunnelPortName string) error { options, err := i.ovsBridgeClient.GetInterfaceOptions(tunnelPortName) if err != nil { return fmt.Errorf("error getting interface options: %w", err) } updatedOptions := make(map[string]interface{}) for k, v := range options { updatedOptions[k] = v } updatedOptions["csum"] = "true" return i.ovsBridgeClient.SetInterfaceOptions(tunnelPortName, updatedOptions) } // initNodeLocalConfig retrieves node's subnet CIDR from node.spec.PodCIDR, which is used for IPAM and setup // host gateway interface. func (i *Initializer) initNodeLocalConfig() error { nodeName, err := env.GetNodeName() if err != nil { return err } node, err := i.client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed to get node from K8s with name %s: %v", nodeName, err) return err } var nodeIPv4Addr, nodeIPv6Addr, transportIPv4Addr, transportIPv6Addr *net.IPNet var localIntf *net.Interface // Find the interface configured with Node IP and use it for Pod traffic. ipAddrs, err := k8s.GetNodeAddrs(node) if err != nil { return fmt.Errorf("failed to obtain local IP addresses from K8s: %w", err) } nodeIPv4Addr, nodeIPv6Addr, localIntf, err = getIPNetDeviceFromIP(ipAddrs) if err != nil { return fmt.Errorf("failed to get local IPNet device with IP %v: %v", ipAddrs, err) } transportIPv4Addr = nodeIPv4Addr transportIPv6Addr = nodeIPv6Addr if i.networkConfig.TransportIface != "" { // Find the configured transport interface, and update its IP address in Node's annotation. transportIPv4Addr, transportIPv6Addr, localIntf, err = getTransportIPNetDeviceByName(i.networkConfig.TransportIface, i.ovsBridge) if err != nil { return fmt.Errorf("failed to get local IPNet device with transport interface %s: %v", i.networkConfig.TransportIface, err) } klog.InfoS("Updating Node transport addresses annotation") var ips []string if transportIPv4Addr != nil { ips = append(ips, transportIPv4Addr.IP.String()) } if transportIPv6Addr != nil { ips = append(ips, transportIPv6Addr.IP.String()) } if err := i.patchNodeAnnotations(nodeName, types.NodeTransportAddressAnnotationKey, strings.Join(ips, ",")); err != nil { return err } } else if len(i.networkConfig.TransportIfaceCIDRs) > 0 { transportIPv4Addr, transportIPv6Addr, localIntf, err = getIPNetDeviceByCIDRs(i.networkConfig.TransportIfaceCIDRs) if err != nil { return fmt.Errorf("failed to get local IPNet device with transport Address CIDR %s: %v", i.networkConfig.TransportIfaceCIDRs, err) } var ips []string if transportIPv4Addr != nil { ips = append(ips, transportIPv4Addr.IP.String()) } if transportIPv6Addr != nil { ips = append(ips, transportIPv6Addr.IP.String()) } klog.InfoS("Updating Node transport addresses annotation") if err := i.patchNodeAnnotations(nodeName, types.NodeTransportAddressAnnotationKey, strings.Join(ips, ",")); err != nil { return err } } else { // Remove the existing annotation "transport-address" if transportInterface is not set in the configuration. if node.Annotations[types.NodeTransportAddressAnnotationKey] != "" { klog.InfoS("Removing Node transport address annotation") i.patchNodeAnnotations(nodeName, types.NodeTransportAddressAnnotationKey, nil) } } // Update the Node's MAC address in the annotations of the Node. The MAC address will be used for direct routing by // OVS in noencap case on Windows Nodes. As a mixture of Linux and Windows nodes is possible, Linux Nodes' MAC // addresses should be reported too to make them discoverable for Windows Nodes. if i.networkConfig.TrafficEncapMode.SupportsNoEncap() { klog.Infof("Updating Node MAC annotation") if err := i.patchNodeAnnotations(nodeName, types.NodeMACAddressAnnotationKey, localIntf.HardwareAddr.String()); err != nil { return err } } i.nodeConfig = &config.NodeConfig{ Name: nodeName, OVSBridge: i.ovsBridge, DefaultTunName: defaultTunInterfaceName, NodeIPv4Addr: nodeIPv4Addr, NodeIPv6Addr: nodeIPv6Addr, NodeTransportIPv4Addr: transportIPv4Addr, NodeTransportIPv6Addr: transportIPv6Addr, UplinkNetConfig: new(config.AdapterNetConfig), NodeLocalInterfaceMTU: localIntf.MTU, WireGuardConfig: i.wireGuardConfig, } mtu, err := i.getNodeMTU(localIntf) if err != nil { return err } i.nodeConfig.NodeMTU = mtu klog.Infof("Setting Node MTU=%d", mtu) if i.networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() { return nil } // Parse all PodCIDRs first, so that we could support IPv4/IPv6 dual-stack configurations. if node.Spec.PodCIDRs != nil { for _, podCIDR := range node.Spec.PodCIDRs { _, localSubnet, err := net.ParseCIDR(podCIDR) if err != nil { klog.Errorf("Failed to parse subnet from CIDR string %s: %v", node.Spec.PodCIDR, err) return err } if localSubnet.IP.To4() != nil { if i.nodeConfig.PodIPv4CIDR != nil { klog.Warningf("One IPv4 PodCIDR is already configured on this Node, ignore the IPv4 Subnet CIDR %s", localSubnet.String()) } else { i.nodeConfig.PodIPv4CIDR = localSubnet klog.V(2).Infof("Configure IPv4 Subnet CIDR %s on this Node", localSubnet.String()) } continue } if i.nodeConfig.PodIPv6CIDR != nil { klog.Warningf("One IPv6 PodCIDR is already configured on this Node, ignore the IPv6 subnet CIDR %s", localSubnet.String()) } else { i.nodeConfig.PodIPv6CIDR = localSubnet klog.V(2).Infof("Configure IPv6 Subnet CIDR %s on this Node", localSubnet.String()) } } return nil } // Spec.PodCIDR can be empty due to misconfiguration. if node.Spec.PodCIDR == "" { klog.Errorf("Spec.PodCIDR is empty for Node %s. Please make sure --allocate-node-cidrs is enabled "+ "for kube-controller-manager and --cluster-cidr specifies a sufficient CIDR range", nodeName) return fmt.Errorf("CIDR string is empty for node %s", nodeName) } _, localSubnet, err := net.ParseCIDR(node.Spec.PodCIDR) if err != nil { klog.Errorf("Failed to parse subnet from CIDR string %s: %v", node.Spec.PodCIDR, err) return err } if localSubnet.IP.To4() != nil { i.nodeConfig.PodIPv4CIDR = localSubnet } else { i.nodeConfig.PodIPv6CIDR = localSubnet } return nil } // initializeIPSec checks if preconditions are met for using IPsec and reads the IPsec PSK value. func (i *Initializer) initializeIPSec() error { // At the time the agent is initialized and this code is executed, the // OVS daemons are already running given that we have successfully // connected to OVSDB. Given that the start_ovs script deletes existing // PID files before starting the OVS daemons, it is safe to assume that // if this file exists, the IPsec monitor is indeed running. const ovsMonitorIPSecPID = "/var/run/openvswitch/ovs-monitor-ipsec.pid" timer := time.NewTimer(10 * time.Second) defer timer.Stop() ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() for { if _, err := os.Stat(ovsMonitorIPSecPID); err == nil { klog.V(2).Infof("OVS IPsec monitor seems to be present") break } select { case <-ticker.C: continue case <-timer.C: return fmt.Errorf("IPsec was requested, but the OVS IPsec monitor does not seem to be running") } } if err := i.readIPSecPSK(); err != nil { return err } return nil } // initializeWireguard checks if preconditions are met for using WireGuard and initializes WireGuard client or cleans up. func (i *Initializer) initializeWireGuard() error { i.wireGuardConfig.MTU = i.nodeConfig.NodeLocalInterfaceMTU - config.WireGuardOverhead wgClient, err := wireguard.New(i.client, i.nodeConfig, i.wireGuardConfig) if err != nil { return err } i.wireGuardClient = wgClient return i.wireGuardClient.Init() } // readIPSecPSK reads the IPsec PSK value from environment variable ANTREA_IPSEC_PSK func (i *Initializer) readIPSecPSK() error { i.networkConfig.IPSecPSK = os.Getenv(ipsecPSKEnvKey) if i.networkConfig.IPSecPSK == "" { return fmt.Errorf("IPsec PSK environment variable '%s' is not set or is empty", ipsecPSKEnvKey) } // Usually one does not want to log the secret data. klog.V(4).Infof("IPsec PSK value: %s", i.networkConfig.IPSecPSK) return nil } func getLastRoundNum(bridgeClient ovsconfig.OVSBridgeClient) (uint64, error) { extIDs, ovsCfgErr := bridgeClient.GetExternalIDs() if ovsCfgErr != nil { return 0, fmt.Errorf("error getting external IDs: %w", ovsCfgErr) } roundNumValue, exists := extIDs[roundNumKey] if !exists { return 0, fmt.Errorf("no round number found in OVSDB") } num, err := strconv.ParseUint(roundNumValue, 10, 64) if err != nil { return 0, fmt.Errorf("error parsing last round number %v: %w", num, err) } return num, nil } func saveRoundNum(num uint64, bridgeClient ovsconfig.OVSBridgeClient) error { extIDs, ovsCfgErr := bridgeClient.GetExternalIDs() if ovsCfgErr != nil { return fmt.Errorf("error getting external IDs: %w", ovsCfgErr) } updatedExtIDs := make(map[string]interface{}) for k, v := range extIDs { updatedExtIDs[k] = v } updatedExtIDs[roundNumKey] = fmt.Sprint(num) return bridgeClient.SetExternalIDs(updatedExtIDs) } func getRoundInfo(bridgeClient ovsconfig.OVSBridgeClient) types.RoundInfo { roundInfo := types.RoundInfo{} num, err := getLastRoundNum(bridgeClient) if err != nil { klog.Infof("No round number found in OVSDB, using %v", initialRoundNum) // We use a fixed value instead of a randomly-generated value to ensure that stale // flows can be properly deleted in case of multiple rapid restarts when the agent // is first deployed to a Node. num = initialRoundNum } else { roundInfo.PrevRoundNum = new(uint64) *roundInfo.PrevRoundNum = num num++ } num %= 1 << cookie.BitwidthRound klog.Infof("Using round number %d", num) roundInfo.RoundNum = num return roundInfo } func (i *Initializer) getNodeMTU(localIntf *net.Interface) (int, error) { if i.mtu != 0 { return i.mtu, nil } mtu := localIntf.MTU // Make sure mtu is set on the interface. if mtu <= 0 { return 0, fmt.Errorf("Failed to fetch Node MTU : %v", mtu) } if i.networkConfig.TrafficEncapMode.SupportsEncap() { if i.networkConfig.TunnelType == ovsconfig.VXLANTunnel { mtu -= config.VXLANOverhead } else if i.networkConfig.TunnelType == ovsconfig.GeneveTunnel { mtu -= config.GeneveOverhead } else if i.networkConfig.TunnelType == ovsconfig.GRETunnel { mtu -= config.GREOverhead } if i.nodeConfig.NodeIPv6Addr != nil { mtu -= config.IPv6ExtraOverhead } } if i.networkConfig.TrafficEncryptionMode == config.TrafficEncryptionModeIPSec { mtu -= config.IPSecESPOverhead } return mtu, nil } func (i *Initializer) allocateGatewayAddresses(localSubnets []*net.IPNet, gatewayIface *interfacestore.InterfaceConfig) error { var gwIPs []*net.IPNet for _, localSubnet := range localSubnets { if localSubnet == nil { continue } subnetID := localSubnet.IP.Mask(localSubnet.Mask) gwIP := &net.IPNet{IP: ip.NextIP(subnetID), Mask: localSubnet.Mask} gwIPs = append(gwIPs, gwIP) } if len(gwIPs) == 0 { return nil } // Check IP address configuration on existing interface first, return if the interface has the desired addresses. // We perform this check unconditionally, even if the OVS port does not exist when this function is called // (i.e. portExists is false). Indeed, it may be possible for the interface to exist even if the OVS bridge does // not exist. // Configure any missing IP address on the interface. Remove any extra IP address that may exist. if err := util.ConfigureLinkAddresses(i.nodeConfig.GatewayConfig.LinkIndex, gwIPs); err != nil { return err } // Periodically check whether IP configuration of the gateway is correct. // Terminate when stopCh is closed. go wait.Until(func() { if err := util.ConfigureLinkAddresses(i.nodeConfig.GatewayConfig.LinkIndex, gwIPs); err != nil { klog.Errorf("Failed to check IP configuration of the gateway: %v", err) } }, 60*time.Second, i.stopCh) for _, gwIP := range gwIPs { if gwIP.IP.To4() != nil { i.nodeConfig.GatewayConfig.IPv4 = gwIP.IP } else { i.nodeConfig.GatewayConfig.IPv6 = gwIP.IP } gatewayIface.IPs = append(gatewayIface.IPs, gwIP.IP) } return nil } func (i *Initializer) patchNodeAnnotations(nodeName, key string, value interface{}) error { patch, _ := json.Marshal(map[string]interface{}{ "metadata": map[string]interface{}{ "annotations": map[string]interface{}{ key: value, }, }, }) if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { _, err := i.client.CoreV1().Nodes().Patch(context.TODO(), nodeName, apitypes.MergePatchType, patch, metav1.PatchOptions{}) return err }); err != nil { klog.ErrorS(err, "Failed to patch Node annotation", "key", key, "value", value) return err } return nil }
1
47,857
Was it a bug?
antrea-io-antrea
go
@@ -54,10 +54,8 @@ class ProxyType: value = str(value).upper() for attr in dir(cls): attr_value = getattr(cls, attr) - if isinstance(attr_value, dict) and \ - 'string' in attr_value and \ - attr_value['string'] is not None and \ - attr_value['string'] == value: + # `attr_value['string'] is not None` probably not required as `attr_value['string'] == value` + if isinstance(attr_value, dict) and 'string' in attr_value and attr_value['string'] == value: return attr_value raise Exception(f"No proxy type is found for {value}")
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ The Proxy implementation. """ class ProxyTypeFactory: """ Factory for proxy types. """ @staticmethod def make(ff_value, string): return {'ff_value': ff_value, 'string': string} class ProxyType: """ Set of possible types of proxy. Each proxy type has 2 properties: 'ff_value' is value of Firefox profile preference, 'string' is id of proxy type. """ DIRECT = ProxyTypeFactory.make(0, 'DIRECT') # Direct connection, no proxy (default on Windows). MANUAL = ProxyTypeFactory.make(1, 'MANUAL') # Manual proxy settings (e.g., for httpProxy). PAC = ProxyTypeFactory.make(2, 'PAC') # Proxy autoconfiguration from URL. RESERVED_1 = ProxyTypeFactory.make(3, 'RESERVED1') # Never used. AUTODETECT = ProxyTypeFactory.make(4, 'AUTODETECT') # Proxy autodetection (presumably with WPAD). SYSTEM = ProxyTypeFactory.make(5, 'SYSTEM') # Use system settings (default on Linux). UNSPECIFIED = ProxyTypeFactory.make(6, 'UNSPECIFIED') # Not initialized (for internal use). @classmethod def load(cls, value): if isinstance(value, dict) and 'string' in value: value = value['string'] value = str(value).upper() for attr in dir(cls): attr_value = getattr(cls, attr) if isinstance(attr_value, dict) and \ 'string' in attr_value and \ attr_value['string'] is not None and \ attr_value['string'] == value: return attr_value raise Exception(f"No proxy type is found for {value}") class Proxy(object): """ Proxy contains information about proxy type and necessary proxy settings. """ proxyType = ProxyType.UNSPECIFIED autodetect = False ftpProxy = '' httpProxy = '' noProxy = '' proxyAutoconfigUrl = '' sslProxy = '' socksProxy = '' socksUsername = '' socksPassword = '' socksVersion = None def __init__(self, raw=None): """ Creates a new Proxy. :Args: - raw: raw proxy data. If None, default class values are used. """ if raw is not None: if 'proxyType' in raw and raw['proxyType'] is not None: self.proxy_type = ProxyType.load(raw['proxyType']) if 'ftpProxy' in raw and raw['ftpProxy'] is not None: self.ftp_proxy = raw['ftpProxy'] if 'httpProxy' in raw and raw['httpProxy'] is not None: self.http_proxy = raw['httpProxy'] if 'noProxy' in raw and raw['noProxy'] is not None: self.no_proxy = raw['noProxy'] if 'proxyAutoconfigUrl' in raw and raw['proxyAutoconfigUrl'] is not None: self.proxy_autoconfig_url = raw['proxyAutoconfigUrl'] if 'sslProxy' in raw and raw['sslProxy'] is not None: self.sslProxy = raw['sslProxy'] if 'autodetect' in raw and raw['autodetect'] is not None: self.auto_detect = raw['autodetect'] if 'socksProxy' in raw and raw['socksProxy'] is not None: self.socks_proxy = raw['socksProxy'] if 'socksUsername' in raw and raw['socksUsername'] is not None: self.socks_username = raw['socksUsername'] if 'socksPassword' in raw and raw['socksPassword'] is not None: self.socks_password = raw['socksPassword'] if 'socksVersion' in raw and raw['socksVersion'] is not None: self.socks_version = raw['socksVersion'] @property def proxy_type(self): """ Returns proxy type as `ProxyType`. """ return self.proxyType @proxy_type.setter def proxy_type(self, value): """ Sets proxy type. :Args: - value: The proxy type. """ self._verify_proxy_type_compatibility(value) self.proxyType = value @property def auto_detect(self): """ Returns autodetect setting. """ return self.autodetect @auto_detect.setter def auto_detect(self, value): """ Sets autodetect setting. :Args: - value: The autodetect value. """ if isinstance(value, bool): if self.autodetect is not value: self._verify_proxy_type_compatibility(ProxyType.AUTODETECT) self.proxyType = ProxyType.AUTODETECT self.autodetect = value else: raise ValueError("Autodetect proxy value needs to be a boolean") @property def ftp_proxy(self): """ Returns ftp proxy setting. """ return self.ftpProxy @ftp_proxy.setter def ftp_proxy(self, value): """ Sets ftp proxy setting. :Args: - value: The ftp proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.ftpProxy = value @property def http_proxy(self): """ Returns http proxy setting. """ return self.httpProxy @http_proxy.setter def http_proxy(self, value): """ Sets http proxy setting. :Args: - value: The http proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.httpProxy = value @property def no_proxy(self): """ Returns noproxy setting. """ return self.noProxy @no_proxy.setter def no_proxy(self, value): """ Sets noproxy setting. :Args: - value: The noproxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.noProxy = value @property def proxy_autoconfig_url(self): """ Returns proxy autoconfig url setting. """ return self.proxyAutoconfigUrl @proxy_autoconfig_url.setter def proxy_autoconfig_url(self, value): """ Sets proxy autoconfig url setting. :Args: - value: The proxy autoconfig url value. """ self._verify_proxy_type_compatibility(ProxyType.PAC) self.proxyType = ProxyType.PAC self.proxyAutoconfigUrl = value @property def ssl_proxy(self): """ Returns https proxy setting. """ return self.sslProxy @ssl_proxy.setter def ssl_proxy(self, value): """ Sets https proxy setting. :Args: - value: The https proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.sslProxy = value @property def socks_proxy(self): """ Returns socks proxy setting. """ return self.socksProxy @socks_proxy.setter def socks_proxy(self, value): """ Sets socks proxy setting. :Args: - value: The socks proxy value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksProxy = value @property def socks_username(self): """ Returns socks proxy username setting. """ return self.socksUsername @socks_username.setter def socks_username(self, value): """ Sets socks proxy username setting. :Args: - value: The socks proxy username value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksUsername = value @property def socks_password(self): """ Returns socks proxy password setting. """ return self.socksPassword @socks_password.setter def socks_password(self, value): """ Sets socks proxy password setting. :Args: - value: The socks proxy password value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksPassword = value @property def socks_version(self): """ Returns socks proxy version setting. """ return self.socksVersion @socks_version.setter def socks_version(self, value): """ Sets socks proxy version setting. :Args: - value: The socks proxy version value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksVersion = value def _verify_proxy_type_compatibility(self, compatibleProxy): if self.proxyType != ProxyType.UNSPECIFIED and self.proxyType != compatibleProxy: raise Exception(f"Specified proxy type ({compatibleProxy}) not compatible with current setting ({self.proxyType})") def add_to_capabilities(self, capabilities): """ Adds proxy information as capability in specified capabilities. :Args: - capabilities: The capabilities to which proxy will be added. """ proxy_caps = {} proxy_caps['proxyType'] = self.proxyType['string'] if self.autodetect: proxy_caps['autodetect'] = self.autodetect if self.ftpProxy: proxy_caps['ftpProxy'] = self.ftpProxy if self.httpProxy: proxy_caps['httpProxy'] = self.httpProxy if self.proxyAutoconfigUrl: proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl if self.sslProxy: proxy_caps['sslProxy'] = self.sslProxy if self.noProxy: proxy_caps['noProxy'] = self.noProxy if self.socksProxy: proxy_caps['socksProxy'] = self.socksProxy if self.socksUsername: proxy_caps['socksUsername'] = self.socksUsername if self.socksPassword: proxy_caps['socksPassword'] = self.socksPassword if self.socksVersion: proxy_caps['socksVersion'] = self.socksVersion capabilities['proxy'] = proxy_caps
1
18,411
# `attr_value['string'] is not None` probably not required as `attr_value['string'] == value` check is already being done
SeleniumHQ-selenium
py
@@ -66,6 +66,7 @@ def replace_variables(win_id, arglist): 'url:host': lambda: _current_url(tabbed_browser).host(), 'clipboard': utils.get_clipboard, 'primary': lambda: utils.get_clipboard(selection=True), + 'link_hovered': lambda: tabbed_browser._now_focused._last_hovered_link, } for key in list(variables): modified_key = '{' + key + '}'
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Module containing command managers (SearchRunner and CommandRunner).""" import traceback import re import attr from PyQt5.QtCore import pyqtSlot, QUrl, QObject from qutebrowser.config import config from qutebrowser.commands import cmdexc, cmdutils from qutebrowser.utils import message, objreg, qtutils, usertypes, utils from qutebrowser.misc import split last_command = {} @attr.s class ParseResult: """The result of parsing a commandline.""" cmd = attr.ib() args = attr.ib() cmdline = attr.ib() def _current_url(tabbed_browser): """Convenience method to get the current url.""" try: return tabbed_browser.current_url() except qtutils.QtValueError as e: msg = "Current URL is invalid" if e.reason: msg += " ({})".format(e.reason) msg += "!" raise cmdexc.CommandError(msg) def replace_variables(win_id, arglist): """Utility function to replace variables like {url} in a list of args.""" variables = { 'url': lambda: _current_url(tabbed_browser).toString( QUrl.FullyEncoded | QUrl.RemovePassword), 'url:pretty': lambda: _current_url(tabbed_browser).toString( QUrl.DecodeReserved | QUrl.RemovePassword), 'url:host': lambda: _current_url(tabbed_browser).host(), 'clipboard': utils.get_clipboard, 'primary': lambda: utils.get_clipboard(selection=True), } for key in list(variables): modified_key = '{' + key + '}' variables[modified_key] = lambda x=modified_key: x values = {} args = [] tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) def repl_cb(matchobj): """Return replacement for given match.""" var = matchobj.group("var") if var not in values: values[var] = variables[var]() return values[var] repl_pattern = re.compile("{(?P<var>" + "|".join(variables.keys()) + ")}") try: for arg in arglist: # using re.sub with callback function replaces all variables in a # single pass and avoids expansion of nested variables (e.g. # "{url}" from clipboard is not expanded) args.append(repl_pattern.sub(repl_cb, arg)) except utils.ClipboardError as e: raise cmdexc.CommandError(e) return args class CommandParser: """Parse qutebrowser commandline commands. Attributes: _partial_match: Whether to allow partial command matches. """ def __init__(self, partial_match=False): self._partial_match = partial_match def _get_alias(self, text, default=None): """Get an alias from the config. Args: text: The text to parse. default : Default value to return when alias was not found. Return: The new command string if an alias was found. Default value otherwise. """ parts = text.strip().split(maxsplit=1) try: alias = config.val.aliases[parts[0]] except KeyError: return default try: new_cmd = '{} {}'.format(alias, parts[1]) except IndexError: new_cmd = alias if text.endswith(' '): new_cmd += ' ' return new_cmd def _parse_all_gen(self, text, *args, aliases=True, **kwargs): """Split a command on ;; and parse all parts. If the first command in the commandline is a non-split one, it only returns that. Args: text: Text to parse. aliases: Whether to handle aliases. *args/**kwargs: Passed to parse(). Yields: ParseResult tuples. """ text = text.strip().lstrip(':').strip() if not text: raise cmdexc.NoSuchCommandError("No command given") if aliases: text = self._get_alias(text, text) if ';;' in text: # Get the first command and check if it doesn't want to have ;; # split. first = text.split(';;')[0] result = self.parse(first, *args, **kwargs) if result.cmd.no_cmd_split: sub_texts = [text] else: sub_texts = [e.strip() for e in text.split(';;')] else: sub_texts = [text] for sub in sub_texts: yield self.parse(sub, *args, **kwargs) def parse_all(self, *args, **kwargs): """Wrapper over _parse_all_gen.""" return list(self._parse_all_gen(*args, **kwargs)) def parse(self, text, *, fallback=False, keep=False): """Split the commandline text into command and arguments. Args: text: Text to parse. fallback: Whether to do a fallback splitting when the command was unknown. keep: Whether to keep special chars and whitespace Return: A ParseResult tuple. """ cmdstr, sep, argstr = text.partition(' ') if not cmdstr and not fallback: raise cmdexc.NoSuchCommandError("No command given") if self._partial_match: cmdstr = self._completion_match(cmdstr) try: cmd = cmdutils.cmd_dict[cmdstr] except KeyError: if not fallback: raise cmdexc.NoSuchCommandError( '{}: no such command'.format(cmdstr)) cmdline = split.split(text, keep=keep) return ParseResult(cmd=None, args=None, cmdline=cmdline) args = self._split_args(cmd, argstr, keep) if keep and args: cmdline = [cmdstr, sep + args[0]] + args[1:] elif keep: cmdline = [cmdstr, sep] else: cmdline = [cmdstr] + args[:] return ParseResult(cmd=cmd, args=args, cmdline=cmdline) def _completion_match(self, cmdstr): """Replace cmdstr with a matching completion if there's only one match. Args: cmdstr: The string representing the entered command so far Return: cmdstr modified to the matching completion or unmodified """ matches = [cmd for cmd in sorted(cmdutils.cmd_dict, key=len) if cmdstr in cmd] if len(matches) == 1: cmdstr = matches[0] elif len(matches) > 1 and config.val.completion.use_best_match: cmdstr = matches[0] return cmdstr def _split_args(self, cmd, argstr, keep): """Split the arguments from an arg string. Args: cmd: The command we're currently handling. argstr: An argument string. keep: Whether to keep special chars and whitespace Return: A list containing the split strings. """ if not argstr: return [] elif cmd.maxsplit is None: return split.split(argstr, keep=keep) else: # If split=False, we still want to split the flags, but not # everything after that. # We first split the arg string and check the index of the first # non-flag args, then we re-split again properly. # example: # # input: "--foo -v bar baz" # first split: ['--foo', '-v', 'bar', 'baz'] # 0 1 2 3 # second split: ['--foo', '-v', 'bar baz'] # (maxsplit=2) split_args = split.simple_split(argstr, keep=keep) flag_arg_count = 0 for i, arg in enumerate(split_args): arg = arg.strip() if arg.startswith('-'): if arg in cmd.flags_with_args: flag_arg_count += 1 else: maxsplit = i + cmd.maxsplit + flag_arg_count return split.simple_split(argstr, keep=keep, maxsplit=maxsplit) # If there are only flags, we got it right on the first try # already. return split_args class CommandRunner(QObject): """Parse and run qutebrowser commandline commands. Attributes: _win_id: The window this CommandRunner is associated with. """ def __init__(self, win_id, partial_match=False, parent=None): super().__init__(parent) self._parser = CommandParser(partial_match=partial_match) self._win_id = win_id def run(self, text, count=None): """Parse a command from a line of text and run it. Args: text: The text to parse. count: The count to pass to the command. """ record_last_command = True record_macro = True mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) cur_mode = mode_manager.mode for result in self._parser.parse_all(text): if result.cmd.no_replace_variables: args = result.args else: args = replace_variables(self._win_id, result.args) result.cmd.run(self._win_id, args, count=count) if result.cmdline[0] == 'repeat-command': record_last_command = False if result.cmdline[0] in ['record-macro', 'run-macro', 'set-cmd-text']: record_macro = False if record_last_command: last_command[cur_mode] = (text, count) if record_macro and cur_mode == usertypes.KeyMode.normal: macro_recorder = objreg.get('macro-recorder') macro_recorder.record_command(text, count) @pyqtSlot(str, int) @pyqtSlot(str) def run_safely(self, text, count=None): """Run a command and display exceptions in the statusbar.""" try: self.run(text, count) except cmdexc.Error as e: message.error(str(e), stack=traceback.format_exc())
1
21,680
You're accessing private variables here - `last_hovered_link` should be public in `TabData`. As for `tabbed_browser._now_focused`, I think you can use `tabbed_browser.widget.currentWidget()` instead.
qutebrowser-qutebrowser
py
@@ -0,0 +1,10 @@ +class InvitationMailer < BaseMailer + def invitation(invitation_id) + @invitation = Invitation.find(invitation_id) + + mail( + to: @invitation.email, + subject: 'Invitation' + ) + end +end
1
1
10,791
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -0,0 +1,15 @@ +namespace Fixtures.Azure.SwaggerBatSubscriptionIdApiVersion +{ + using System; + using System.Collections; + using System.Collections.Generic; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Rest; + using Microsoft.Azure; + using Models; + + public static partial class MicrosoftAzureTestUrlExtensions + { + } +}
1
1
20,728
We should file a bug for this - we don't need the extensions class if there are no operations on the client
Azure-autorest
java
@@ -415,6 +415,8 @@ func (p *csiPlugin) NewMounter( } klog.V(4).Info(log("created path successfully [%s]", dataDir)) + mounter.MetricsProvider = NewMetricsCsi(volumeHandle, dir, csiDriverName(driverName)) + // persist volume info data for teardown node := string(p.host.GetNodeName()) volData := map[string]string{
1
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @CHANGELOG KubeEdge Authors: To create mini-kubelet for edge deployment scenario, this file is derived from kubernetes v1.15.3, and the full file path is k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go and make some modifications including: 1. remove BlockVolumePlugin. 2. empty skipAttach function. 3. add NewController function. */ package csi import ( "context" "errors" "fmt" "os" "path" "sort" "strings" "time" api "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" storagelisters "k8s.io/client-go/listers/storage/v1" csitranslationplugins "k8s.io/csi-translation-lib/plugins" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" "github.com/kubeedge/kubeedge/edge/pkg/edged/volume/csi/nodeinfomanager" ) const ( // CSIPluginName is the name of the in-tree CSI Plugin CSIPluginName = "kubernetes.io/csi" // TODO (vladimirvivien) implement a more dynamic way to discover // the unix domain socket path for each installed csi driver. // TODO (vladimirvivien) would be nice to name socket with a .sock extension // for consistency. csiAddrTemplate = "/var/lib/kubelet/plugins/%v/csi.sock" csiTimeout = 2 * time.Minute volNameSep = "^" volDataFileName = "vol_data.json" fsTypeBlockName = "block" // TODO: increase to something useful //csiResyncPeriod = time.Minute ) type csiPlugin struct { host volume.VolumeHost blockEnabled bool csiDriverLister storagelisters.CSIDriverLister } //TODO (vladimirvivien) add this type to storage api type driverMode string const persistentDriverMode driverMode = "persistent" const ephemeralDriverMode driverMode = "ephemeral" // ProbeVolumePlugins returns implemented plugins func ProbeVolumePlugins() []volume.VolumePlugin { p := &csiPlugin{ host: nil, } return []volume.VolumePlugin{p} } // volume.VolumePlugin methods var _ volume.VolumePlugin = &csiPlugin{} // RegistrationHandler is the handler which is fed to the pluginwatcher API. type RegistrationHandler struct { } // TODO (verult) consider using a struct instead of global variables // csiDrivers map keep track of all registered CSI drivers on the node and their // corresponding sockets var csiDrivers = &DriversStore{} var nim nodeinfomanager.Interface // PluginHandler is the plugin registration handler interface passed to the // pluginwatcher module in kubelet var PluginHandler = &RegistrationHandler{} // ValidatePlugin is called by kubelet's plugin watcher upon detection // of a new registration socket opened by CSI Driver registrar side car. func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error { klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s", pluginName, endpoint, strings.Join(versions, ","))) _, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions) if err != nil { return fmt.Errorf("validation failed for CSI Driver %s at endpoint %s: %v", pluginName, endpoint, err) } return err } // RegisterPlugin is called when a plugin can be registered func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string) error { klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint)) highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions) if err != nil { return err } // Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key // all other CSI components will be able to get the actual socket of CSI drivers by its name. csiDrivers.Set(pluginName, Driver{ endpoint: endpoint, highestSupportedVersion: highestSupportedVersion, }) // Get node info from the driver. csi, err := newCsiDriverClient(csiDriverName(pluginName)) if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx) if err != nil { if unregErr := unregisterDriver(pluginName); unregErr != nil { klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr)) } return err } err = nim.InstallCSIDriver(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology) if err != nil { if unregErr := unregisterDriver(pluginName); unregErr != nil { klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr)) } return err } return nil } func (h *RegistrationHandler) validateVersions(callerName, pluginName string, endpoint string, versions []string) (*utilversion.Version, error) { if len(versions) == 0 { err := fmt.Errorf("%s for CSI driver %q failed. Plugin returned an empty list for supported versions", callerName, pluginName) klog.Error(err) return nil, err } // Validate version newDriverHighestVersion, err := highestSupportedVersion(versions) if err != nil { err := fmt.Errorf("%s for CSI driver %q failed. None of the versions specified %q are supported. err=%v", callerName, pluginName, versions, err) klog.Error(err) return nil, err } existingDriver, driverExists := csiDrivers.Get(pluginName) if driverExists { if !existingDriver.highestSupportedVersion.LessThan(newDriverHighestVersion) { err := fmt.Errorf("%s for CSI driver %q failed. Another driver with the same name is already registered with a higher supported version: %q", callerName, pluginName, existingDriver.highestSupportedVersion) klog.Error(err) return nil, err } } return newDriverHighestVersion, nil } // DeRegisterPlugin is called when a plugin removed its socket, signaling // it is no longer available func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) { klog.V(4).Info(log("registrationHandler.DeRegisterPlugin request for plugin %s", pluginName)) if err := unregisterDriver(pluginName); err != nil { klog.Error(log("registrationHandler.DeRegisterPlugin failed: %v", err)) } } func (p *csiPlugin) Init(host volume.VolumeHost) error { p.host = host csiClient := host.GetKubeClient() if csiClient == nil { klog.Warning(log("kubeclient not set, assuming standalone kubelet")) } else { // set CSIDriverLister adcHost, ok := host.(volume.AttachDetachVolumeHost) if ok { p.csiDriverLister = adcHost.CSIDriverLister() if p.csiDriverLister == nil { klog.Error(log("CSIDriverLister not found on AttachDetachVolumeHost")) } } kletHost, ok := host.(volume.KubeletVolumeHost) if ok { p.csiDriverLister = kletHost.CSIDriverLister() if p.csiDriverLister == nil { klog.Error(log("CSIDriverLister not found on KubeletVolumeHost")) } } } var migratedPlugins = map[string](func() bool){ csitranslationplugins.GCEPDInTreePluginName: func() bool { return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE) }, csitranslationplugins.AWSEBSInTreePluginName: func() bool { return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS) }, csitranslationplugins.CinderInTreePluginName: func() bool { return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) }, } // Initializing the label management channels nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins) // TODO: Evaluate the feature releated to csi /*if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { // This function prevents Kubelet from posting Ready status until CSINodeInfo // is both installed and initialized if err := initializeCSINode(host); err != nil { return fmt.Errorf("failed to initialize CSINodeInfo: %v", err) } }*/ return nil } func initializeCSINode(host volume.VolumeHost) error { kvh, ok := host.(volume.KubeletVolumeHost) if !ok { klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINodeInfo initialization, not running on kubelet") return nil } kubeClient := host.GetKubeClient() if kubeClient == nil { // Kubelet running in standalone mode. Skip CSINodeInfo initialization klog.Warning("Skipping CSINodeInfo initialization, kubelet running in standalone mode") return nil } kvh.SetKubeletError(errors.New("CSINodeInfo is not yet initialized")) go func() { defer utilruntime.HandleCrash() // Backoff parameters tuned to retry over 140 seconds. Will fail and restart the Kubelet // after max retry steps. initBackoff := wait.Backoff{ Steps: 6, Duration: 15 * time.Millisecond, Factor: 6.0, Jitter: 0.1, } err := wait.ExponentialBackoff(initBackoff, func() (bool, error) { klog.V(4).Infof("Initializing migrated drivers on CSINodeInfo") err := nim.InitializeCSINodeWithAnnotation() if err != nil { kvh.SetKubeletError(fmt.Errorf("failed to initialize CSINodeInfo: %v", err)) klog.Errorf("Failed to initialize CSINodeInfo: %v", err) return false, nil } // Successfully initialized drivers, allow Kubelet to post Ready kvh.SetKubeletError(nil) return true, nil }) if err != nil { // 2 releases after CSIMigration and all CSIMigrationX (where X is a volume plugin) // are permanently enabled the apiserver/controllers can assume that the kubelet is // using CSI for all Migrated volume plugins. Then all the CSINode initialization // code can be dropped from Kubelet. // Kill the Kubelet process and allow it to restart to retry initialization klog.Fatalf("Failed to initialize CSINodeInfo after retrying") } }() return nil } func (p *csiPlugin) GetPluginName() string { return CSIPluginName } // GetvolumeName returns a concatenated string of CSIVolumeSource.Driver<volNameSe>CSIVolumeSource.VolumeHandle // That string value is used in Detach() to extract driver name and volumeName. func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) { csi, err := getPVSourceFromSpec(spec) if err != nil { klog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err)) return "", err } // return driverName<separator>volumeHandle return fmt.Sprintf("%s%s%s", csi.Driver, volNameSep, csi.VolumeHandle), nil } func (p *csiPlugin) CanSupport(spec *volume.Spec) bool { // TODO (vladimirvivien) CanSupport should also take into account // the availability/registration of specified Driver in the volume source if spec == nil { return false } if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) { return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil) || (spec.Volume != nil && spec.Volume.CSI != nil) } return spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil } func (p *csiPlugin) IsMigratedToCSI() bool { return false } func (p *csiPlugin) RequiresRemount(*volume.Spec) bool { return false } func (p *csiPlugin) NewMounter( spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { volSrc, pvSrc, err := getSourceFromSpec(spec) if err != nil { return nil, err } var ( driverName string volumeHandle string readOnly bool ) switch { case volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume): volumeHandle = makeVolumeHandle(string(pod.UID), spec.Name()) driverName = volSrc.Driver if volSrc.ReadOnly != nil { readOnly = *volSrc.ReadOnly } case pvSrc != nil: driverName = pvSrc.Driver volumeHandle = pvSrc.VolumeHandle readOnly = spec.ReadOnly default: return nil, fmt.Errorf("volume source not found in volume.Spec") } driverMode, err := p.getDriverMode(spec) if err != nil { return nil, err } k8s := p.host.GetKubeClient() if k8s == nil { klog.Error(log("failed to get a kubernetes client")) return nil, errors.New("failed to get a Kubernetes client") } kvh, ok := p.host.(volume.KubeletVolumeHost) if !ok { return nil, errors.New(log("cast from VolumeHost to KubeletVolumeHost failed")) } mounter := &csiMountMgr{ plugin: p, k8s: k8s, spec: spec, pod: pod, podUID: pod.UID, driverName: csiDriverName(driverName), driverMode: driverMode, volumeID: volumeHandle, specVolumeID: spec.Name(), readOnly: readOnly, kubeVolHost: kvh, } mounter.csiClientGetter.driverName = csiDriverName(driverName) // Save volume info in pod dir dir := mounter.GetPath() dataDir := path.Dir(dir) // dropoff /mount at end if err := os.MkdirAll(dataDir, 0750); err != nil { klog.Error(log("failed to create dir %#v: %v", dataDir, err)) return nil, err } klog.V(4).Info(log("created path successfully [%s]", dataDir)) // persist volume info data for teardown node := string(p.host.GetNodeName()) volData := map[string]string{ volDataKey.specVolID: spec.Name(), volDataKey.volHandle: volumeHandle, volDataKey.driverName: driverName, volDataKey.nodeName: node, volDataKey.driverMode: string(driverMode), } attachID := getAttachmentName(volumeHandle, driverName, node) volData[volDataKey.attachmentID] = attachID if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil { klog.Error(log("failed to save volume info data: %v", err)) if err := os.RemoveAll(dataDir); err != nil { klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) return nil, err } return nil, err } klog.V(4).Info(log("mounter created successfully")) return mounter, nil } func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) kvh, ok := p.host.(volume.KubeletVolumeHost) if !ok { return nil, errors.New(log("cast from VolumeHost to KubeletVolumeHost failed")) } unmounter := &csiMountMgr{ plugin: p, podUID: podUID, specVolumeID: specName, kubeVolHost: kvh, } // load volume info from file dir := unmounter.GetPath() dataDir := path.Dir(dir) // dropoff /mount at end data, err := loadVolumeData(dataDir, volDataFileName) if err != nil { klog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err)) return nil, err } unmounter.driverName = csiDriverName(data[volDataKey.driverName]) unmounter.volumeID = data[volDataKey.volHandle] unmounter.csiClientGetter.driverName = unmounter.driverName return unmounter, nil } func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { klog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath)) volData, err := loadVolumeData(mountPath, volDataFileName) if err != nil { klog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err)) return nil, err } klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData)) var spec *volume.Spec inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) if inlineEnabled { mode := driverMode(volData[volDataKey.driverMode]) switch { case mode == ephemeralDriverMode: spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName]) case mode == persistentDriverMode: fallthrough default: spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle]) } } else { spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle]) } return spec, nil } // constructVolSourceSpec constructs volume.Spec with CSIVolumeSource func (p *csiPlugin) constructVolSourceSpec(volSpecName, driverName string) *volume.Spec { vol := &api.Volume{ Name: volSpecName, VolumeSource: api.VolumeSource{ CSI: &api.CSIVolumeSource{ Driver: driverName, }, }, } return volume.NewSpecFromVolume(vol) } //constructPVSourceSpec constructs volume.Spec with CSIPersistentVolumeSource func (p *csiPlugin) constructPVSourceSpec(volSpecName, driverName, volumeHandle string) *volume.Spec { fsMode := api.PersistentVolumeFilesystem pv := &api.PersistentVolume{ ObjectMeta: meta.ObjectMeta{ Name: volSpecName, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ CSI: &api.CSIPersistentVolumeSource{ Driver: driverName, VolumeHandle: volumeHandle, }, }, VolumeMode: &fsMode, }, } return volume.NewSpecFromPersistentVolume(pv, false) } func (p *csiPlugin) SupportsMountOption() bool { // TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags // to probe for the result for this method // (bswartz) Until the CSI spec supports probing, our only option is to // make plugins register their support for mount options or lack thereof // directly with kubernetes. return true } func (p *csiPlugin) SupportsBulkVolumeVerification() bool { return false } // volume.AttachableVolumePlugin methods var _ volume.AttachableVolumePlugin = &csiPlugin{} var _ volume.DeviceMountableVolumePlugin = &csiPlugin{} func (p *csiPlugin) NewAttacher() (volume.Attacher, error) { return p.newAttacherDetacher() } func (p *csiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) { return p.NewAttacher() } func (p *csiPlugin) NewDetacher() (volume.Detacher, error) { return p.newAttacherDetacher() } func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) { driverMode, err := p.getDriverMode(spec) if err != nil { return false, err } if driverMode == ephemeralDriverMode { klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name())) return false, nil } pvSrc, err := getCSISourceFromSpec(spec) if err != nil { return false, err } driverName := pvSrc.Driver skipAttach, err := p.skipAttach(driverName) if err != nil { return false, err } return !skipAttach, nil } // CanDeviceMount returns true if the spec supports device mount func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) { driverMode, err := p.getDriverMode(spec) if err != nil { return false, err } if driverMode == ephemeralDriverMode { klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name())) return false, nil } return true, nil } func (p *csiPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) { return p.NewDetacher() } func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { m := p.host.GetMounter(p.GetPluginName()) return m.GetMountRefs(deviceMountPath) } // skipAttach looks up CSIDriver object associated with driver name // to determine if driver requires attachment volume operation func (p *csiPlugin) skipAttach(driver string) (bool, error) { return false, nil } // getDriverMode returns the driver mode for the specified spec: {persistent|ephemeral}. // 1) If mode cannot be determined, it will default to "persistent". // 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned // See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md func (p *csiPlugin) getDriverMode(spec *volume.Spec) (driverMode, error) { // TODO (vladimirvivien) ultimately, mode will be retrieved from CSIDriver.Spec.Mode. // However, in alpha version, mode is determined by the volume source: // 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral // 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent volSrc, _, err := getSourceFromSpec(spec) if err != nil { return "", err } if volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) { return ephemeralDriverMode, nil } return persistentDriverMode, nil } func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) { skip, err := p.skipAttach(driver) if err != nil { return nil, err } if skip { return nil, nil } attachID := getAttachmentName(handle, driver, nodeName) // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName attachment, err := client.StorageV1().VolumeAttachments().Get(context.Background(), attachID, meta.GetOptions{}) if err != nil { return nil, err // This err already has enough context ("VolumeAttachment xyz not found") } if attachment == nil { err = errors.New("no existing VolumeAttachment found") return nil, err } return attachment.Status.AttachmentMetadata, nil } func (p *csiPlugin) newAttacherDetacher() (*csiAttacher, error) { k8s := p.host.GetKubeClient() if k8s == nil { klog.Error(log("unable to get kubernetes client from host")) return nil, errors.New("unable to get Kubernetes client") } return &csiAttacher{ plugin: p, k8s: k8s, waitSleepTime: 1 * time.Second, }, nil } // NewController returns a csi controller instance func NewController() *Controller { return &Controller{} } func unregisterDriver(driverName string) error { csiDrivers.Delete(driverName) if err := nim.UninstallCSIDriver(driverName); err != nil { klog.Errorf("Error uninstalling CSI driver: %v", err) return err } return nil } // Return the highest supported version func highestSupportedVersion(versions []string) (*utilversion.Version, error) { if len(versions) == 0 { return nil, fmt.Errorf("CSI driver reporting empty array for supported versions") } // Sort by lowest to highest version sort.Slice(versions, func(i, j int) bool { parsedVersionI, err := utilversion.ParseGeneric(versions[i]) if err != nil { // Push bad values to the bottom return true } parsedVersionJ, err := utilversion.ParseGeneric(versions[j]) if err != nil { // Push bad values to the bottom return false } return parsedVersionI.LessThan(parsedVersionJ) }) for i := len(versions) - 1; i >= 0; i-- { highestSupportedVersion, err := utilversion.ParseGeneric(versions[i]) if err != nil { return nil, err } if highestSupportedVersion.Major() <= 1 { return highestSupportedVersion, nil } } return nil, fmt.Errorf("none of the CSI versions reported by this driver are supported") } // Only drivers that implement CSI 0.x are allowed to use deprecated socket dir. func isDeprecatedSocketDirAllowed(versions []string) bool { for _, version := range versions { if isV0Version(version) { return true } } return false } func isV0Version(version string) bool { parsedVersion, err := utilversion.ParseGeneric(version) if err != nil { return false } return parsedVersion.Major() == 0 }
1
24,189
Is it possible to import from k8s instead of copy in? :)
kubeedge-kubeedge
go
@@ -25,7 +25,7 @@ class ProjectCacheProvider extends \Psalm\Internal\Provider\ProjectCacheProvider * * @return void */ - public function processSuccessfulRun($start_time) + public function processSuccessfulRun(float $start_time) { $this->last_run = (int) $start_time; }
1
<?php namespace Psalm\Tests\Internal\Provider; use function microtime; use PhpParser; class ProjectCacheProvider extends \Psalm\Internal\Provider\ProjectCacheProvider { /** * @var int */ private $last_run = 0; public function __construct() { } public function getLastRun(): int { return $this->last_run; } /** * @param float $start_time * * @return void */ public function processSuccessfulRun($start_time) { $this->last_run = (int) $start_time; } public function canDiffFiles(): bool { return $this->last_run > 0; } public function hasLockfileChanged() : bool { return false; } public function updateComposerLockHash() : void { } }
1
9,037
`@param float` can be dropped here.
vimeo-psalm
php
@@ -35,7 +35,7 @@ import java.util.NoSuchElementException; public abstract class CoprocessIterator<T> implements Iterator<T> { protected final TiSession session; protected final List<RegionTask> regionTasks; - protected final DAGRequest dagRequest; + protected DAGRequest dagRequest; protected final DataType[] handleTypes; // protected final ExecutorCompletionService<Iterator<SelectResponse>> completionService; protected RowReader rowReader;
1
/* * Copyright 2017 PingCAP, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * See the License for the specific language governing permissions and * limitations under the License. */ package com.pingcap.tikv.operation.iterator; import static java.util.Objects.requireNonNull; import com.pingcap.tidb.tipb.Chunk; import com.pingcap.tidb.tipb.DAGRequest; import com.pingcap.tikv.TiSession; import com.pingcap.tikv.codec.CodecDataInput; import com.pingcap.tikv.meta.TiDAGRequest; import com.pingcap.tikv.operation.SchemaInfer; import com.pingcap.tikv.row.Row; import com.pingcap.tikv.row.RowReader; import com.pingcap.tikv.row.RowReaderFactory; import com.pingcap.tikv.types.DataType; import com.pingcap.tikv.util.RangeSplitter.RegionTask; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; public abstract class CoprocessIterator<T> implements Iterator<T> { protected final TiSession session; protected final List<RegionTask> regionTasks; protected final DAGRequest dagRequest; protected final DataType[] handleTypes; // protected final ExecutorCompletionService<Iterator<SelectResponse>> completionService; protected RowReader rowReader; protected CodecDataInput dataInput; protected boolean eof = false; protected int taskIndex; protected int chunkIndex; protected List<Chunk> chunkList; protected SchemaInfer schemaInfer; CoprocessIterator( DAGRequest req, List<RegionTask> regionTasks, TiSession session, SchemaInfer infer) { this.dagRequest = req; this.session = session; this.regionTasks = regionTasks; this.schemaInfer = infer; this.handleTypes = infer.getTypes().toArray(new DataType[] {}); } abstract void submitTasks(); /** * Build a DAGIterator from TiDAGRequest and region tasks to get rows * * <p>When we are preforming a scan request using coveringIndex, {@link * com.pingcap.tidb.tipb.IndexScan} should be used to read index rows. In other circumstances, * {@link com.pingcap.tidb.tipb.TableScan} is used to scan table rows. * * @param req TiDAGRequest built * @param regionTasks a list or RegionTask each contains a task on a single region * @param session TiSession * @return a DAGIterator to be processed */ public static CoprocessIterator<Row> getRowIterator( TiDAGRequest req, List<RegionTask> regionTasks, TiSession session) { TiDAGRequest dagRequest = req.copy(); return new DAGIterator<Row>( dagRequest.buildTableScan(), regionTasks, session, SchemaInfer.create(dagRequest), dagRequest.getPushDownType()) { @Override public Row next() { if (hasNext()) { return rowReader.readRow(schemaInfer.getTypes().toArray(new DataType[0])); } else { throw new NoSuchElementException(); } } }; } /** * Build a DAGIterator from TiDAGRequest and region tasks to get handles * * <p>When we use getHandleIterator, we must be preforming a IndexScan. * * @param req TiDAGRequest built * @param regionTasks a list or RegionTask each contains a task on a single region * @param session TiSession * @return a DAGIterator to be processed */ public static CoprocessIterator<Long> getHandleIterator( TiDAGRequest req, List<RegionTask> regionTasks, TiSession session) { return new DAGIterator<Long>( req.buildIndexScan(), regionTasks, session, SchemaInfer.create(req, true), req.getPushDownType()) { @Override public Long next() { if (hasNext()) { return rowReader.readRow(handleTypes).getLong(handleTypes.length - 1); } else { throw new NoSuchElementException(); } } }; } boolean tryAdvanceChunkIndex() { if (chunkList == null || chunkIndex >= chunkList.size() - 1) { return false; } chunkIndex++; return true; } void createDataInputReader() { requireNonNull(chunkList, "Chunk list should not be null."); if (0 > chunkIndex || chunkIndex >= chunkList.size()) { throw new IllegalArgumentException(); } dataInput = new CodecDataInput(chunkList.get(chunkIndex).getRowsData()); rowReader = RowReaderFactory.createRowReader(dataInput); } }
1
10,433
maybe we can leave this change un-reverted.
pingcap-tispark
java
@@ -5,7 +5,7 @@ package net.sourceforge.pmd.lang.java.ast; -public class ASTMethodDeclarator extends AbstractJavaNode { +public class ASTMethodDeclarator extends AbstractJavaAccessNode { public ASTMethodDeclarator(int id) { super(id); }
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ /* Generated By:JJTree: Do not edit this line. ASTMethodDeclarator.java */ package net.sourceforge.pmd.lang.java.ast; public class ASTMethodDeclarator extends AbstractJavaNode { public ASTMethodDeclarator(int id) { super(id); } public ASTMethodDeclarator(JavaParser p, int id) { super(p, id); } public int getParameterCount() { return this.jjtGetChild(0).jjtGetNumChildren(); } /** * Accept the visitor. * */ public Object jjtAccept(JavaParserVisitor visitor, Object data) { return visitor.visit(this, data); } }
1
13,755
I wouldn't make MethodDeclarator an AccessNode, nor an Annotatable. It's the MethodDeclaration that can be annotated, and has info about the modifiers, and is already an AccessNode
pmd-pmd
java
@@ -0,0 +1,16 @@ +package net.runelite.api.events.player.headicon; + +import lombok.Getter; +import net.runelite.api.Player; +import net.runelite.api.events.Event; + +public abstract class PlayerHeadIconChanged implements Event +{ + @Getter + private final Player player; + + public PlayerHeadIconChanged(Player player) + { + this.player = player; + } +}
1
1
16,367
delete this class
open-osrs-runelite
java
@@ -378,7 +378,7 @@ window.PopulatorView = countlyView.extend({ if (["true", "false"].indexOf(s) !== -1) { return s === "true"; } - else if (/^[1-9][0-9]+|0$/.test(s)) { + else if (/^[1-9][0-9]+|0|[1-9]$/.test(s)) { return parseInt(s); } else {
1
/*global countlyPopulator, countlyGlobal, store, countlyCommon, $, moment, app, countlyView, T, jQuery, PopulatorView, CountlyHelpers*/ window.PopulatorView = countlyView.extend({ _tab: 'populator', templateTable: undefined, templateId: undefined, rowInEdit: undefined, initialize: function() { }, beforeRender: function() { if (!this.template) { var self = this; return $.when(T.render('/populator/templates/populate.html', function(src) { self.template = src; })).then(function() {}); } }, renderTab: function() { $(".populator-tab-switcher .populator-tab-item:not([data-target='" + this._tab + "'])").removeClass("active"); $(".populator-tab-switcher .populator-tab-item[data-target='" + this._tab + "']").addClass("active"); $(".populator-tab-view:not(#" + this._tab + ")").hide(); $("#" + this._tab + "-tab").show(); }, updateTemplateSelector: function() { countlyPopulator.getTemplates(function(templates) { var templateList = []; templates.forEach(function(template) { templateList.push({name: template.name, value: template._id}); }); $(".populator-template-name.cly-select").clySelectSetItems(templateList); }); }, renderTemplatesTable: function() { var self = this; var columnsDefine = [{ mData: "name", sType: "string", sTitle: jQuery.i18n.map["populator.template"], bSortable: false }, { mData: function(row) { return row.isDefault ? $.i18n.map["populator.template-type-default"] : $.i18n.map["populator.template-type-custom"]; }, sType: "string", sTitle: jQuery.i18n.map["populator.template-type"], bSortable: false }, { mData: function(row) { return (row && row.up && Object.keys(row.up).length) || 0; }, sType: "numeric", sTitle: jQuery.i18n.map["populator.number-of-user-props"], bSortable: true }, { mData: function(row) { return (row && row.events && Object.keys(row.events).length) || 0; }, sType: "numeric", sTitle: jQuery.i18n.map["populator.number-of-events"], bSortable: true }, { mData: function(row) { return (row && row.lastEditedBy || "-"); }, sType: "string", sTitle: jQuery.i18n.map["populator.edited-by"], bSortable: false }]; columnsDefine.push({ mData: function(row) { if (!(countlyGlobal.member.admin_of && (countlyGlobal.member.admin_of.indexOf(countlyCommon.ACTIVE_APP_ID) !== -1)) && !(countlyGlobal.member.global_admin)) { return ''; } else { var editMenu = "<div class='populator-template-options-item options-item' data-id='" + row._id + "'>" + "<div class='edit-icon'></div>" + "<div class='edit-menu populator-template-menu'>"; if (row.isDefault) { editMenu += "<div class='duplicate-populator-template item' data-localize='populator.duplicate-template'><i class='fa fa-clone'></i>" + $.i18n.map["populator.duplicate-template"] + "</div>"; } else { editMenu += "<div class='edit-populator-template item' data-localize='populator.edit-template'><i class='fa fa-pencil'></i>" + $.i18n.map["populator.edit-template"] + "</div>" + "<div class='duplicate-populator-template item' data-localize='populator.duplicate-template'><i class='fa fa-clone'></i>" + $.i18n.map["populator.duplicate-template"] + "</div>" + "<div class='delete-populator-template item' data-localize='populator.delete-template'><i class='fa fa-trash'></i>" + $.i18n.map["populator.delete-template"] + "</div>"; } editMenu += "</div></div>"; } return editMenu; }, bSortable: false }); countlyPopulator.getTemplates(function(templates) { self.templateTable = $('#populator-templates-table').dataTable($.extend({}, $.fn.dataTable.defaults, { aaData: templates || [], aoColumns: columnsDefine })); var templateList = []; (templates || []).forEach(function(template) { templateList.push({name: template.name, value: template._id}); }); $(".populator-template-name.cly-select").clySelectSetItems(templateList); }); $("#templates-tab").off("click", ".edit-icon").on("click", ".edit-icon", function(e) { var menu = $(e.currentTarget).parents(".populator-template-options-item").find(".edit-menu"); menu.toggle(); /* if (!menu.is(":hidden")) { setTimeout(function() { menu.find(".edit-menu").hide(); }, 5000); } */ }); $("#templates-tab").off("click", ".edit-populator-template").on("click", ".edit-populator-template", function(e) { var cell = $(e.currentTarget).parents(".populator-template-options-item"); cell.find(".edit-menu").hide(); countlyPopulator.getTemplate(cell.data("id"), function(template) { self.renderTemplateDrawer(template); self.templateId = cell.data("id"); self.rowInEdit = cell.parents("tr")[0]; $("#populator-template-drawer").addClass("open"); }); }); $("#templates-tab").off("click", ".duplicate-populator-template").on("click", ".duplicate-populator-template", function(e) { var cell = $(e.currentTarget).parents(".populator-template-options-item"); cell.find(".edit-menu").hide(); countlyPopulator.getTemplate(cell.data("id"), function(template) { self.renderTemplateDrawer(template, true); $("#populator-template-drawer").addClass("open"); }); }); $("#templates-tab").off("click", ".delete-populator-template").on("click", ".delete-populator-template", function(e) { var cell = $(e.currentTarget).parents(".populator-template-options-item"); cell.find(".edit-menu").hide(); countlyPopulator.removeTemplate(cell.data("id"), function() { self.templateTable.fnDeleteRow(cell.parents("tr")[0]); self.updateTemplateSelector(); }); }); }, renderTemplateDrawer: function(templateData, forceCreate) { var self = this; var isEditing = (typeof templateData !== "undefined") && !forceCreate; $("#drawer-title").text(isEditing ? $.i18n.prop("populator.edit-template", templateData && templateData.name || "") : $.i18n.prop("populator.create-template")); $("#populator-template-name").val(templateData && templateData.name || ""); $(".populator-custom-user-prop-row.header-row").hide(); $(".populator-custom-user-prop-row:not(.header-row)").remove(); $("#populator-template-events > .populator-event-row").remove(); if (isEditing) { $("#populator-template-discard-changes").show(); } else { $("#populator-template-discard-changes").hide(); } if ($("#populator-template-name").val() === "") { $("#populator-template-save").addClass("disabled"); } else { $("#populator-template-save").removeClass("disabled"); } $("#populator-template-name").off("change paste keyup").on("change paste keyup", function() { if ($("#populator-template-name").val() === "") { $("#populator-template-save").addClass("disabled"); } else { $("#populator-template-save").removeClass("disabled"); } }); if (templateData && templateData.up && Object.keys(templateData.up).length > 0) { $(".populator-custom-user-prop-row.header-row").show(); Object.keys(templateData.up).forEach(function(key) { $(".populator-custom-user-prop-row:last").after( "<div class=\"populator-custom-user-prop-row\">" + "<input class=\"input populator-custom-user-prop-key\" type=\"text\" class=\"input\" value=\"" + key + "\"/>" + "<input class=\"input populator-custom-user-prop-values\" type=\"text\" class=\"input\" value=\"" + templateData.up[key].map(function(val) { return val + ""; }).join(", ") + "\"/>" + "<div class=\"icon-button remove text-light-gray\"><i class=\"material-icons\">highlight_off</i></div>" + "</div>" ); app.localize($("#populator-template-drawer")); }); } else { $(".populator-custom-user-prop-row.header-row").hide(); } Object.keys(templateData && templateData.events || {}).forEach(function(key) { var event = templateData.events[key]; var row = "<div class=\"populator-event-row\">" + "<div class=\"populator-event-key-row\">" + "<div class=\"label\" data-localize=\"populator.event-key\"></div>" + "<input type=\"text\" class=\"input\" value=\"" + key + "\"/>" + "<div class=\"populator-template-remove-event text-link\" data-localize=\"populator.remove-event\"></div>" + "</div>" + "<div class=\"populator-event-segmentation-table\">" + "<div class=\"populator-event-segmentation-row header-row\">" + "<div class=\"label populator-event-segmentation-key\" data-localize=\"populator.segmentation-key\"></div>" + "<div class=\"label populator-event-segmentation-values\" data-localize=\"populator.segmentation-values\"></div>" + "</div>"; if (event.segments && Object.keys(event.segments).length > 0) { Object.keys(event.segments).forEach(function(segmentationKey) { row += "<div class=\"populator-event-segmentation-row\">" + "<input class=\"input populator-event-segmentation-key\" type=\"text\" class=\"input\"/ value=\"" + segmentationKey + "\">" + "<input class=\"input populator-event-segmentation-values\" type=\"text\" class=\"input\"/ value=\"" + event.segments[segmentationKey].join(", ") + "\">" + "<div class=\"icon-button remove text-light-gray\"><i class=\"material-icons\">highlight_off</i></div>" + "</div>"; }); } row += "</div><div class=\"populator-event-add-segmentation text-link\" data-localize=\"populator.add-segmentation\"></div>"; row += "<div class=\"populator-event-property populator-template-event-duration\"><div class=\"fa check-green " + (event.duration ? "fa-check-square" : "fa-square-o") + "\"></div><div class=\"content\"><div class=\"help-title\" data-localize=\"populator.duration-help-title\"></div><div class=\"help-subtitle\" data-localize=\"populator.duration-help-subtitle\"></div><div class=\"event-property-inputs\"><input type=\"number\" class=\"input duration-start\" value=\"" + (event.duration && event.duration[0] || "") + "\"/><span> - </span><input type=\"number\" class=\"input duration-end\" value=\"" + (event.duration && event.duration[1] || "") + "\"/></div></div></div>"; row += "<div class=\"populator-event-property populator-template-event-sum\"><div class=\"fa check-green " + (event.sum ? "fa-check-square" : "fa-square-o") + "\"></div><div class=\"content\"><div class=\"help-title\" data-localize=\"populator.sum-help-title\"></div><div class=\"help-subtitle\" data-localize=\"populator.sum-help-subtitle\"></div><div class=\"event-property-inputs\"><input type=\"number\" class=\"input sum-start\" value=\"" + (event.sum && event.sum[0] || "") + "\"/><span> - </span><input type=\"number\" class=\"input sum-end\" value=\"" + (event.sum && event.sum[1] || "") + "\"/></div></div></div>"; $("#populator-template-add-event").before(row); if (!(event.segments && Object.keys(event.segments).length > 0)) { $("#populator-template-drawer .populator-event-row:last .populator-event-segmentation-table .header-row").hide(); } }); app.localize($("#populator-template-drawer")); $("#populator-add-custom-user-prop").off("click").on("click", function() { $(".populator-custom-user-prop-row.header-row").show(); $(".populator-custom-user-prop-row:last").after( "<div class=\"populator-custom-user-prop-row\">" + "<input class=\"input populator-custom-user-prop-key\" type=\"text\" class=\"input\"/>" + "<input class=\"input populator-custom-user-prop-values\" type=\"text\" class=\"input\"/>" + "<div class=\"icon-button remove text-light-gray\"><i class=\"material-icons\">highlight_off</i></div>" + "</div>" ); app.localize($("#populator-template-drawer")); }); $("#populator-template-add-event").off("click").on("click", function() { $("#populator-template-add-event").before( "<div class=\"populator-event-row\">" + "<div class=\"populator-event-key-row\">" + "<div class=\"label\" data-localize=\"populator.event-key\"></div>" + "<input type=\"text\" class=\"input\"/>" + "<div class=\"populator-template-remove-event text-link\" data-localize=\"populator.remove-event\"></div>" + "</div>" + "<div class=\"populator-event-segmentation-table\">" + "<div class=\"populator-event-segmentation-row header-row\" style=\"display: none;\">" + "<div class=\"label populator-event-segmentation-key\" data-localize=\"populator.segmentation-key\"></div>" + "<div class=\"label populator-event-segmentation-values\" data-localize=\"populator.segmentation-values\"></div>" + "</div>" + "</div>" + "<div class=\"populator-event-add-segmentation text-link\" data-localize=\"populator.add-segmentation\"></div>" + "<div class=\"populator-event-property populator-template-event-duration\"><div class=\"fa check-green fa-square-o\"></div><div class=\"content\"><div class=\"help-title\" data-localize=\"populator.duration-help-title\"></div><div class=\"help-subtitle\" data-localize=\"populator.duration-help-subtitle\"></div><div class=\"event-property-inputs\"><input class=\"input duration-start\"/><span> - </span><input class=\"input duration-end\"/></div></div></div>" + "<div class=\"populator-event-property populator-template-event-sum\"><div class=\"fa check-green fa-square-o\"></div><div class=\"content\"><div class=\"help-title\" data-localize=\"populator.sum-help-title\"></div><div class=\"help-subtitle\" data-localize=\"populator.sum-help-subtitle\"></div><div class=\"event-property-inputs\"><input class=\"input sum-start\"/><span> - </span><input class=\"input sum-end\"/></div></div></div>" + "</div>" ); app.localize($("#populator-template-drawer")); }); $("#populator-template-drawer").off("click", ".populator-event-row .populator-event-add-segmentation").on("click", ".populator-event-row .populator-event-add-segmentation", function(e) { var event = $(e.currentTarget).parents(".populator-event-row"); event.find(".populator-event-segmentation-row.header-row").show(); event.find(".populator-event-segmentation-row:last").after( "<div class=\"populator-event-segmentation-row\">" + "<input class=\"input populator-event-segmentation-key\" type=\"text\" class=\"input\"/>" + "<input class=\"input populator-event-segmentation-values\" type=\"text\" class=\"input\"/>" + "<div class=\"icon-button remove text-light-gray\"><i class=\"material-icons\">highlight_off</i></div>" + "</div>" ); }); $("#populator-template-drawer").off("click", ".populator-custom-user-prop-row .remove").on("click", ".populator-custom-user-prop-row .remove", function(e) { var row = $(e.currentTarget).parents(".populator-custom-user-prop-row"); if (row.is(":nth-child(2)") && $(".populator-custom-user-prop-row").length < 3) { $(".populator-custom-user-prop-row.header-row").hide(); } row.remove(); }); $("#populator-template-drawer").off("click", ".populator-event-segmentation-row .remove").on("click", ".populator-event-segmentation-row .remove", function(e) { var row = $(e.currentTarget).parents(".populator-event-segmentation-row"); var event = $(e.currentTarget).parents(".populator-event-row"); if (row.is(":nth-child(2)") && event.find(".populator-event-segmentation-row").length < 3) { event.find(".populator-event-segmentation-row.header-row").hide(); } row.remove(); }); $("#populator-template-drawer").off("click", ".populator-template-remove-event").on("click", ".populator-template-remove-event", function(e) { $(e.currentTarget).parents(".populator-event-row").remove(); }); $("#populator-template-drawer").off("click", ".populator-event-property .check-green").on("click", ".populator-event-property .check-green", function(e) { var checkbox = $(e.currentTarget); if (checkbox.hasClass("fa-check-square")) { checkbox.removeClass("fa-check-square"); checkbox.addClass("fa-square-o"); } else { checkbox.addClass("fa-check-square"); checkbox.removeClass("fa-square-o"); } }); $("#populator-template-drawer .close").off("click").on("click", function() { $("#populator-template-drawer").removeClass("open"); }); $("#populator-template-discard-changes").off("click").on("click", function() { $("#populator-template-drawer").removeClass("open"); }); $("#create-populator-template-button").off("click").on("click", function() { self.renderTemplateDrawer(); $("#populator-template-drawer").addClass("open"); }); $("#populator-template-save").off("click").on("click", function(e) { if ($(e.currentTarget).hasClass("disabled")) { return; } if (isEditing) { countlyPopulator.editTemplate(self.templateId, self.getTemplateData(), function() { self.templateTable.fnUpdate(self.getTemplateData(self.templateId), self.rowInEdit); self.updateTemplateSelector(); }); } else { countlyPopulator.createTemplate(self.getTemplateData(), function(message) { var messageWords = message.result.split(/\s+/); self.templateTable.fnAddData(self.getTemplateData(messageWords[messageWords.length - 1])); self.updateTemplateSelector(); }); } $("#populator-template-drawer").removeClass("open"); }); document.onkeydown = function(evt) { evt = evt || window.event; if (("key" in evt) && (evt.key === "Escape" || evt.key === "Esc") || (evt.keyCode === 27)) { $("#populator-template-drawer").removeClass("open"); } }; }, getTemplateData: function(templateId) { var templateData = {_id: templateId}; /** * Tries to parse a string into a boolean or a number value * @param {string} s a user input word * @returns {boolean|number|string} the cast value */ function dynamicCast(s) { if (["true", "false"].indexOf(s) !== -1) { return s === "true"; } else if (/^[1-9][0-9]+|0$/.test(s)) { return parseInt(s); } else { return s + ""; } } /** * Tries to parse an array of strings into a homogeneous array of string, number or boolean values * @param {array} arr an array of user input words * @returns {array} array of cast values */ function processValues(arr) { var values = [dynamicCast(arr[0])]; var lastType = typeof values[0]; for (var i = 1; i < arr.length; i++) { var currentValue = dynamicCast(arr[i]); var currentType = typeof currentValue; if (lastType !== currentType) { return arr; } values.push(currentValue); } return values; } templateData.name = $("#populator-template-name").val(); if ($(".populator-custom-user-prop-row:not(.header-row)").length > 0) { templateData.up = {}; $(".populator-custom-user-prop-row:not(.header-row)").each(function(index, row) { templateData.up[$(row).find(".input.populator-custom-user-prop-key").val()] = processValues($(row).find(".input.populator-custom-user-prop-values").val().split(/\s*,\s*/)); }); } if ($(".populator-event-row").length > 0) { templateData.events = {}; $(".populator-event-row").each(function(index, row) { var eventKey = $(row).find(".populator-event-key-row input").val(); templateData.events[eventKey] = {}; if ($(row).find(".populator-event-segmentation-row:not(.header-row)").length > 0) { templateData.events[eventKey].segments = {}; $(row).find(".populator-event-segmentation-row:not(.header-row)").each(function(segmentationIndex, segmentationRow) { templateData.events[eventKey].segments[$(segmentationRow).find("input.populator-event-segmentation-key").val()] = processValues($(segmentationRow).find("input.populator-event-segmentation-values").val().split(/\s*,\s*/)); }); } if ($(row).find(".populator-template-event-duration .check-green").hasClass("fa-check-square")) { templateData.events[eventKey].duration = [parseInt($(row).find(".duration-start").val()) || 0, parseInt($(row).find(".duration-end").val()) || 0]; } if ($(row).find(".populator-template-event-sum .check-green").hasClass("fa-check-square")) { templateData.events[eventKey].sum = [parseFloat($(row).find(".sum-start").val()) || 0, parseFloat($(row).find(".sum-end").val()) || 0]; } }); } return templateData; }, renderCommon: function() { this.templateData = { "page-title": jQuery.i18n.map["populator.plugin-title"] }; var self = this; var now = new Date(); var fromDate = new Date(now.getTime() - 1000 * 60 * 60 * 24 * 30); var toDate = now; var maxTime = 60; var maxTimeout; $(this.el).html(this.template(this.templateData)); if (!(countlyGlobal.member.admin_of && (countlyGlobal.member.admin_of.indexOf(countlyCommon.ACTIVE_APP_ID) !== -1)) && !(countlyGlobal.member.global_admin)) { $("#create-populator-template-button").hide(); } else { $("#create-populator-template-button").show(); } if (countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].locked) { $("#populator-locked").show(); $("#populator > .content").hide(); } else { $("#populator-locked").hide(); $("#populator > .content").show(); } self.renderTab(); self.renderTemplatesTable(); self.renderTemplateDrawer(); $(".populator-tab-switcher .populator-tab-item").off("click").on("click", function(e) { self._tab = $(e.target).data("target"); self.renderTab(); }); var setInitialDateValues = false; setInterval(function updateDateRangeButton() { if (app.activeView === self) { if (!setInitialDateValues) { $("#populator #date-picker #date-from").datepicker("setDate", fromDate); $("#populator #date-picker #date-to").datepicker("setDate", toDate); setInitialDateValues = true; } fromDate = $("#populator #date-picker #date-from").datepicker("getDate") || fromDate; toDate = $("#populator #date-picker #date-to").datepicker("getDate") || toDate; $("#populator #selected-date").text(moment(fromDate).format("D MMM, YYYY") + " - " + moment(toDate).format("D MMM, YYYY")); } }, 500); $("#start-populate").addClass("disabled"); $(".populator-template-name.cly-select").on("cly-select-change", function() { $("#start-populate").removeClass("disabled"); }); $("#start-populate").on('click', function() { if ($("#start-populate").hasClass("disabled")) { CountlyHelpers.notify({ message: jQuery.i18n.map["populator.select-a-template-first"], type: "error" }); return; } CountlyHelpers.confirm(jQuery.i18n.map['populator.warning2'], "popStyleGreen", function(result) { if (!result) { return true; } CountlyHelpers.popup('#populator-modal', "populator_modal cly-loading"); $('.stop-populate').off('click').on('click', function(e) { e.preventDefault(); if (maxTimeout) { clearTimeout(maxTimeout); maxTimeout = null; } countlyPopulator.stopGenerating(); $('.close-dialog').trigger('click'); $("#start-populate").show(); $(".populate-bar div").stop(true); $(".populate-bar div").width(0); CountlyHelpers.confirm(jQuery.i18n.map["populator.success"], "popStyleGreen", function(dialogResult) { if (!dialogResult) { return true; } window.location = countlyGlobal.path + "/dashboard"; }, [], { image: 'populate-data', title: jQuery.i18n.map['populator.finished-confirm-title'] }); }); maxTime = parseInt($("#populate-maxtime").val()) || maxTime; maxTimeout = setTimeout(function() { countlyPopulator.stopGenerating(function() { $('.stop-populate').trigger("click"); }); }, maxTime * 1000); fromDate = $("#populator #date-picker #date-from").datepicker("getDate") || fromDate; toDate = $("#populator #date-picker #date-to").datepicker("getDate") || toDate; countlyPopulator.setStartTime(fromDate.getTime() / 1000); countlyPopulator.setEndTime(toDate.getTime() / 1000); if ($(".populator-template-name.cly-select").clySelectGetSelection()) { countlyPopulator.getTemplate($(".populator-template-name.cly-select").clySelectGetSelection(), function(template) { countlyPopulator.generateUsers(maxTime * 4, template); }); } else { countlyPopulator.generateUsers(maxTime * 4); } $("#start-populate").hide(); $(".populate-bar div").animate({width: "100%"}, maxTime * 1000); }, [ jQuery.i18n.map["populator.no-populate-data"], jQuery.i18n.map["populator.yes-populate-data"], ], { image: 'populate-data', title: jQuery.i18n.prop('populator.warning1', CountlyHelpers.appIdsToNames([countlyCommon.ACTIVE_APP_ID])) }); }); $("#populate-explain").on('click', function() { CountlyHelpers.alert(jQuery.i18n.map["populator.help"], "green"); }); if (countlyPopulator.isGenerating()) { $("#start-populate").hide(); $("#stop-populate").show(); countlyPopulator.generateUI(); $("#populate-from").val(moment(countlyPopulator.getStartTime() * 1000).format("YYYY-MM-DD")); $("#populate-to").val(moment(countlyPopulator.getEndTime() * 1000).format("YYYY-MM-DD")); $("#populate-from").datepicker({dateFormat: "yy-mm-dd", defaultDate: new Date(countlyPopulator.getStartTime() * 1000), constrainInput: true, maxDate: now }); $("#populate-to").datepicker({dateFormat: "yy-mm-dd", defaultDate: new Date(countlyPopulator.getEndTime() * 1000), constrainInput: true, maxDate: now }); } else { $("#populate-from").val(moment(fromDate).format("YYYY-MM-DD")); $("#populate-to").val(moment(toDate).format("YYYY-MM-DD")); $("#populate-from").datepicker({dateFormat: "yy-mm-dd", defaultDate: -30, constrainInput: true, maxDate: now }); $("#populate-to").datepicker({dateFormat: "yy-mm-dd", constrainInput: true, maxDate: now }); } app.localize(); /*if (this.state === "/autostart") { $("#start-populate").click(); }*/ }, refresh: function() {} }); //register views app.populatorView = new PopulatorView(); app.route('/manage/populate*state', 'populate', function(state) { if (countlyGlobal.member.global_admin || countlyGlobal.admin_apps[countlyCommon.ACTIVE_APP_ID]) { this.populatorView.state = state; this.renderWhenReady(this.populatorView); } else { app.navigate("/", true); } }); var start_populating = false; app.addPageScript("/manage/apps", function() { var populateApp = '<tr class="populate-demo-data">' + '<td>' + '<span data-localize="populator.demo-data"></span>' + '</td>' + '<td>' + '<label><input type="checkbox" id="populate-app-after"/>&nbsp;&nbsp;&nbsp;<span data-localize="populator.tooltip"></span></label>' + '</td>' + '</tr>'; var populateFirstApp = '<div class="add-app-input-wrapper">' + '<label class="populate-checkbox-container">' + '<input id="populate-first-app-after" type="checkbox">' + '<span class="checkmark"></span>' + $.i18n.map['populator.tooltip'] + '</label>' + '<div class="clear:both"></div><br>' + '</div>'; $("#add-new-app table .table-add").before(populateApp); $('#save-first-app-add').before(populateFirstApp); var saveBtn = store.get('first_app') ? '#save-first-app-add' : '#save-app-add'; $(saveBtn).click(function() { var isFirstApp = store.get('first_app'), isFirstAppPopulateChecked = $("#add-first-app #populate-first-app-after").is(':checked'), isNewAppPopulateChecked = $("#add-new-app table #populate-app-after").is(':checked'); if ((isFirstApp && isFirstAppPopulateChecked) || (!isFirstApp && isNewAppPopulateChecked)) { start_populating = true; setTimeout(function() { start_populating = false; }, 5000); } }); }); app.addAppManagementSwitchCallback(function() { if (start_populating) { start_populating = false; setTimeout(function() { var appId = $("#view-app-id").text(); app.switchApp(appId, function() { app.navigate("/manage/populate/autostart", true); }); }, 1000); } }); $(document).ready(function() { var style = "display:none;"; if (countlyGlobal.member.global_admin || countlyGlobal.admin_apps[countlyCommon.ACTIVE_APP_ID]) { style = ""; } app.addSubMenu("management", {code: "populate", url: "#/manage/populate", text: "populator.title", priority: 70, classes: "populator-menu", style: style}); //listen for UI app change app.addAppSwitchCallback(function(appId) { if (countlyGlobal.member.global_admin || countlyGlobal.admin_apps[appId]) { $(".populator-menu").show(); } else { $(".populator-menu").hide(); } }); });
1
13,583
Replacing `+` (1 or more) with `*` (0 or more) would've also done the trick.
Countly-countly-server
js
@@ -58,7 +58,7 @@ TDTWriter::TDTWriter(std::ostream *outStream, bool takeOwnership) { df_writeNames = true; } -TDTWriter::~TDTWriter() throw() { +TDTWriter::~TDTWriter() { // close the writer if it's still open: if (dp_ostream != nullptr) close(); }
1
// $Id$ // // Copyright (C) 2005-2010 Greg Landrum and Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <RDGeneral/BadFileException.h> #include <RDGeneral/FileParseException.h> #include <RDGeneral/RDLog.h> #include <GraphMol/SmilesParse/SmilesWrite.h> #include "MolWriters.h" #include "FileParsers.h" #include <fstream> #include <iostream> #include <iomanip> #include <sstream> #include <string> #include <boost/algorithm/string.hpp> namespace RDKit { TDTWriter::TDTWriter(const std::string &fileName) { if (fileName != "-") { auto *tmpStream = new std::ofstream(fileName.c_str()); if (!tmpStream || !(*tmpStream) || (tmpStream->bad())) { std::ostringstream errout; errout << "Bad output file " << fileName; throw BadFileException(errout.str()); } dp_ostream = static_cast<std::ostream *>(tmpStream); df_owner = true; } else { dp_ostream = static_cast<std::ostream *>(&std::cout); df_owner = false; } d_molid = 0; d_numDigits = 4; df_write2D = false; df_writeNames = true; } TDTWriter::TDTWriter(std::ostream *outStream, bool takeOwnership) { PRECONDITION(outStream, "null stream"); if (outStream->bad()) { throw FileParseException("Bad output stream"); } dp_ostream = outStream; df_owner = takeOwnership; d_molid = 0; d_numDigits = 4; df_write2D = false; df_writeNames = true; } TDTWriter::~TDTWriter() throw() { // close the writer if it's still open: if (dp_ostream != nullptr) close(); } void TDTWriter::setProps(const STR_VECT &propNames) { if (d_molid > 0) { BOOST_LOG(rdWarningLog) << "WARNING: Setting property list after a few " "molecules have been written\n"; } d_props = propNames; } void TDTWriter::write(const ROMol &mol, int confId) { CHECK_INVARIANT(dp_ostream, "no output stream"); // start by writing a "|" line unless this is the first line if (d_molid > 0) { (*dp_ostream) << "|\n"; } // write the molecule (*dp_ostream) << "$SMI<" << MolToSmiles(mol) << ">\n"; std::string name; if (df_writeNames && mol.getPropIfPresent(common_properties::_Name, name)) { (*dp_ostream) << "NAME<" << name << ">\n"; } // do we need to write coordinates? if (mol.getNumConformers()) { // get the ordering of the atoms in the output SMILES: std::vector<unsigned int> atomOrdering; mol.getProp(common_properties::_smilesAtomOutputOrder, atomOrdering); const Conformer &conf = mol.getConformer(confId); if (df_write2D) { (*dp_ostream) << "2D<"; } else { (*dp_ostream) << "3D<"; } const RDGeom::POINT3D_VECT &coords = conf.getPositions(); int nAts = atomOrdering.size(); for (int i = 0; i < nAts; i++) { (*dp_ostream) << std::setprecision(d_numDigits) << coords[atomOrdering[i]].x << ","; (*dp_ostream) << std::setprecision(d_numDigits) << coords[atomOrdering[i]].y; if (!df_write2D) { (*dp_ostream) << "," << std::setprecision(d_numDigits) << coords[atomOrdering[i]].z; } if (i != nAts - 1) (*dp_ostream) << ","; } (*dp_ostream) << ";>\n"; } // now write the properties STR_VECT_CI pi; if (d_props.size() > 0) { // check if we have any properties the user specified to write out // in which loop over them and write them out for (pi = d_props.begin(); pi != d_props.end(); pi++) { if (mol.hasProp(*pi)) { writeProperty(mol, (*pi)); } } } else { // if use did not specify any properties, write all non computed properties // out to the file STR_VECT properties = mol.getPropList(); STR_VECT compLst; mol.getPropIfPresent(RDKit::detail::computedPropName, compLst); STR_VECT_CI pi; for (pi = properties.begin(); pi != properties.end(); pi++) { // ignore any of the following properties if (((*pi) == RDKit::detail::computedPropName) || ((*pi) == common_properties::_Name) || ((*pi) == "_MolFileInfo") || ((*pi) == "_MolFileComments") || ((*pi) == common_properties::_MolFileChiralFlag)) { continue; } // check if this property is not computed if (std::find(compLst.begin(), compLst.end(), (*pi)) == compLst.end()) { writeProperty(mol, (*pi)); } } } d_molid++; } void TDTWriter::writeProperty(const ROMol &mol, const std::string &name) { PRECONDITION(dp_ostream, "no output stream"); (*dp_ostream) << name << "<"; // write the property value // FIX: we will assume for now that the desired property value is // catable to a string std::string pval; // we need to remove any line breaks in the output, replace them with spaces mol.getProp(name, pval); boost::replace_all(pval, "\n", " "); (*dp_ostream) << pval << ">\n"; } } // namespace RDKit
1
19,036
I've been meaning to fix this for a while. Thanks.
rdkit-rdkit
cpp
@@ -27,10 +27,12 @@ public class Program { using var otel = Sdk.CreateTracerProvider(b => b .AddActivitySource("MyCompany.MyProduct.MyLibrary") - .AddProcessorPipeline(pipeline => - { - pipeline.AddProcessor(current => new MyActivityProcessor()); - })); + + // TODO: seems buggy if you remove A and B here, MyActivityProcessor(C).OnEnd is not called. + // TODO: should the dispose order be C, B, A or A, B C? + .AddProcessorPipeline(p => p.AddProcessor(current => new MyActivityProcessor("A"))) + .AddProcessorPipeline(p => p.AddProcessor(current => new MyActivityProcessor("B"))) + .AddProcessorPipeline(p => p.AddProcessor(current => new MyActivityProcessor("C")))); using (var activity = MyActivitySource.StartActivity("SayHello")) {
1
// <copyright file="Program.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System.Diagnostics; using OpenTelemetry; using OpenTelemetry.Trace; public class Program { private static readonly ActivitySource MyActivitySource = new ActivitySource( "MyCompany.MyProduct.MyLibrary"); public static void Main() { using var otel = Sdk.CreateTracerProvider(b => b .AddActivitySource("MyCompany.MyProduct.MyLibrary") .AddProcessorPipeline(pipeline => { pipeline.AddProcessor(current => new MyActivityProcessor()); })); using (var activity = MyActivitySource.StartActivity("SayHello")) { activity?.SetTag("foo", 1); activity?.SetTag("bar", "Hello, World!"); } } }
1
15,807
This is adding multiple processor pipelines. I guess you wanted to add multiple processors to the same, single pipeline?
open-telemetry-opentelemetry-dotnet
.cs
@@ -11,13 +11,14 @@ from __future__ import absolute_import from __future__ import print_function from scapy.error import Scapy_Exception import scapy.modules.six as six +from scapy.compat import * ############################### ## Direct Access dictionary ## ############################### def fixname(x): - if x and x[0] in "0123456789": + if x and str(x[0]) in "0123456789": x = "n_"+x return x.translate("________________________________________________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz_____________________________________________________________________________________________________________________________________")
1
## This file is part of Scapy ## See http://www.secdev.org/projects/scapy for more informations ## Copyright (C) Philippe Biondi <[email protected]> ## This program is published under a GPLv2 license """ Direct Access dictionary. """ from __future__ import absolute_import from __future__ import print_function from scapy.error import Scapy_Exception import scapy.modules.six as six ############################### ## Direct Access dictionary ## ############################### def fixname(x): if x and x[0] in "0123456789": x = "n_"+x return x.translate("________________________________________________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz_____________________________________________________________________________________________________________________________________") class DADict_Exception(Scapy_Exception): pass class DADict: def __init__(self, _name="DADict", **kargs): self._name=_name self.update(kargs) def fixname(self,val): return fixname(val) def __contains__(self, val): return val in self.__dict__ def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, val): return setattr(self, self.fixname(attr), val) def __iter__(self): return iter(map(lambda x_y1: x_y1[1],filter(lambda x_y: x_y[0] and x_y[0][0]!="_", self.__dict__.items()))) def _show(self): for k in self.__dict__.keys(): if k and k[0] != "_": print("%10s = %r" % (k,getattr(self,k))) def __repr__(self): return "<%s/ %s>" % (self._name," ".join(x for x in self.__dict__ if x and x[0]!="_")) def _branch(self, br, uniq=0): if uniq and br._name in self: raise DADict_Exception("DADict: [%s] already branched in [%s]" % (br._name, self._name)) self[br._name] = br def _my_find(self, *args, **kargs): if args and self._name not in args: return False for k in kargs: if k not in self or self[k] != kargs[k]: return False return True def update(self, *args, **kwargs): for k, v in six.iteritems(dict(*args, **kwargs)): self[k] = v def _find(self, *args, **kargs): return self._recurs_find((), *args, **kargs) def _recurs_find(self, path, *args, **kargs): if self in path: return None if self._my_find(*args, **kargs): return self for o in self: if isinstance(o, DADict): p = o._recurs_find(path+(self,), *args, **kargs) if p is not None: return p return None def _find_all(self, *args, **kargs): return self._recurs_find_all((), *args, **kargs) def _recurs_find_all(self, path, *args, **kargs): r = [] if self in path: return r if self._my_find(*args, **kargs): r.append(self) for o in self: if isinstance(o, DADict): p = o._recurs_find_all(path+(self,), *args, **kargs) r += p return r def keys(self): return list(six.iterkeys(self)) def iterkeys(self): return (x for x in self.__dict__ if x and x[0] != "_") def __len__(self): return len(self.__dict__)
1
10,466
If you need str(x[0]) here, you'll probably need str(x) the line after that I suppose. Also, shouldn't we use `raw()` here instead of `str()`?
secdev-scapy
py
@@ -96,8 +96,8 @@ public class TwoPhaseCommitter { */ private static final int TXN_COMMIT_BATCH_SIZE = 768 * 1024; - /** unit is second */ - private static final long DEFAULT_BATCH_WRITE_LOCK_TTL = 3000; + /** unit is millisecond */ + private static final long DEFAULT_BATCH_WRITE_LOCK_TTL = 3600000; private static final long MAX_RETRY_TIMES = 3;
1
/* * Copyright 2017 PingCAP, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * See the License for the specific language governing permissions and * limitations under the License. */ package com.pingcap.tikv; import com.google.protobuf.ByteString; import com.pingcap.tikv.codec.KeyUtils; import com.pingcap.tikv.exception.GrpcException; import com.pingcap.tikv.exception.TiBatchWriteException; import com.pingcap.tikv.region.RegionManager; import com.pingcap.tikv.region.TiRegion; import com.pingcap.tikv.txn.TxnKVClient; import com.pingcap.tikv.txn.type.BatchKeys; import com.pingcap.tikv.txn.type.ClientRPCResult; import com.pingcap.tikv.txn.type.GroupKeyResult; import com.pingcap.tikv.util.BackOffFunction; import com.pingcap.tikv.util.BackOffer; import com.pingcap.tikv.util.ConcreteBackOffer; import com.pingcap.tikv.util.Pair; import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.tikv.kvproto.Kvrpcpb; import org.tikv.kvproto.Kvrpcpb.Op; import org.tikv.kvproto.Metapb; public class TwoPhaseCommitter { public static class ByteWrapper { private byte[] bytes; public ByteWrapper(byte[] bytes) { this.bytes = bytes; } public byte[] getBytes() { return this.bytes; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ByteWrapper that = (ByteWrapper) o; return Arrays.equals(bytes, that.bytes); } @Override public int hashCode() { return Arrays.hashCode(bytes); } } public static class BytePairWrapper { private byte[] key; private byte[] value; public BytePairWrapper(byte[] key, byte[] value) { this.key = key; this.value = value; } public byte[] getKey() { return key; } public byte[] getValue() { return value; } } /** buffer spark rdd iterator data into memory */ private static final int WRITE_BUFFER_SIZE = 32 * 1024; /** * TiKV recommends each RPC packet should be less than ~1MB. We keep each packet's Key+Value size * below 768KB. */ private static final int TXN_COMMIT_BATCH_SIZE = 768 * 1024; /** unit is second */ private static final long DEFAULT_BATCH_WRITE_LOCK_TTL = 3000; private static final long MAX_RETRY_TIMES = 3; private static final Logger LOG = LoggerFactory.getLogger(TwoPhaseCommitter.class); private TxnKVClient kvClient; private RegionManager regionManager; /** start timestamp of transaction which get from PD */ private final long startTs; public TwoPhaseCommitter(TiConfiguration conf, long startTime) { this.kvClient = TiSessionCache.getSession(conf).createTxnClient(); this.regionManager = kvClient.getRegionManager(); this.startTs = startTime; } public void close() throws Exception {} /** * 2pc - prewrite primary key * * @param backOffer * @param primaryKey * @param value * @return */ public void prewritePrimaryKey(BackOffer backOffer, byte[] primaryKey, byte[] value) throws TiBatchWriteException { this.doPrewritePrimaryKeyWithRetry( backOffer, ByteString.copyFrom(primaryKey), ByteString.copyFrom(value)); } private void doPrewritePrimaryKeyWithRetry(BackOffer backOffer, ByteString key, ByteString value) throws TiBatchWriteException { Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key); TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; Kvrpcpb.Mutation mutation; if (!value.isEmpty()) { mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setValue(value).setOp(Op.Put).build(); } else { mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setOp(Op.Del).build(); } List<Kvrpcpb.Mutation> mutationList = Collections.singletonList(mutation); // send rpc request to tikv server long lockTTL = getTxnLockTTL(this.startTs); ClientRPCResult prewriteResult = this.kvClient.prewrite( backOffer, mutationList, key, lockTTL, this.startTs, tiRegion, store); if (!prewriteResult.isSuccess() && !prewriteResult.isRetry()) { throw new TiBatchWriteException("prewrite primary key error", prewriteResult.getException()); } if (prewriteResult.isRetry()) { try { backOffer.doBackOff( BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException( String.format("Txn prewrite primary key failed, regionId=%s", tiRegion.getId()), prewriteResult.getException())); // re-split keys and commit again. this.doPrewritePrimaryKeyWithRetry(backOffer, key, value); } catch (GrpcException e) { String errorMsg = String.format( "Txn prewrite primary key error, re-split commit failed, regionId=%s, detail=%s", tiRegion.getId(), e.getMessage()); throw new TiBatchWriteException(errorMsg, e); } } LOG.debug("prewrite primary key {} successfully", KeyUtils.formatBytes(key)); } /** * 2pc - commit primary key * * @param backOffer * @param key * @return */ public void commitPrimaryKey(BackOffer backOffer, byte[] key, long commitTs) throws TiBatchWriteException { doCommitPrimaryKeyWithRetry(backOffer, ByteString.copyFrom(key), commitTs); } private void doCommitPrimaryKeyWithRetry(BackOffer backOffer, ByteString key, long commitTs) throws TiBatchWriteException { Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key); TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; ByteString[] keys = new ByteString[] {key}; // send rpc request to tikv server ClientRPCResult commitResult = this.kvClient.commit(backOffer, keys, this.startTs, commitTs, tiRegion, store); if (!commitResult.isSuccess()) { if (!commitResult.isRetry()) { throw new TiBatchWriteException("commit primary key error", commitResult.getException()); } else { backOffer.doBackOff( BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException( String.format("Txn commit primary key failed, regionId=%s", tiRegion.getId()), commitResult.getException())); // re-split keys and commit again. this.doCommitPrimaryKeyWithRetry(backOffer, key, commitTs); } } LOG.debug("commit primary key {} successfully", KeyUtils.formatBytes(key)); } /** * 2pc - prewrite secondary keys * * @param primaryKey * @param pairs * @return */ public void prewriteSecondaryKeys(byte[] primaryKey, Iterator<BytePairWrapper> pairs) throws TiBatchWriteException { Iterator<Pair<ByteString, ByteString>> byteStringKeys = new Iterator<Pair<ByteString, ByteString>>() { @Override public boolean hasNext() { return pairs.hasNext(); } @Override public Pair<ByteString, ByteString> next() { BytePairWrapper pair = pairs.next(); return new Pair<>( ByteString.copyFrom(pair.getKey()), ByteString.copyFrom(pair.getValue())); } }; doPrewriteSecondaryKeys(ByteString.copyFrom(primaryKey), byteStringKeys); } private void doPrewriteSecondaryKeys( ByteString primaryKey, Iterator<Pair<ByteString, ByteString>> pairs) throws TiBatchWriteException { int totalSize = 0; while (pairs.hasNext()) { ByteString[] keyBytes = new ByteString[WRITE_BUFFER_SIZE]; ByteString[] valueBytes = new ByteString[WRITE_BUFFER_SIZE]; int size = 0; while (size < WRITE_BUFFER_SIZE && pairs.hasNext()) { Pair<ByteString, ByteString> pair = pairs.next(); keyBytes[size] = pair.first; valueBytes[size] = pair.second; size++; } BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(BackOffer.BATCH_PREWRITE_BACKOFF); doPrewriteSecondaryKeysInBatchesWithRetry( backOffer, primaryKey, keyBytes, valueBytes, size, 0); totalSize = totalSize + size; } } private void doPrewriteSecondaryKeysInBatchesWithRetry( BackOffer backOffer, ByteString primaryKey, ByteString[] keys, ByteString[] values, int size, int level) throws TiBatchWriteException { if (keys == null || keys.length == 0 || values == null || values.length == 0 || size <= 0) { // return success return; } Map<ByteString, Kvrpcpb.Mutation> mutations = new LinkedHashMap<>(); for (int i = 0; i < size; i++) { ByteString key = keys[i]; ByteString value = values[i]; Kvrpcpb.Mutation mutation; if (!value.isEmpty()) { mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setValue(value).setOp(Kvrpcpb.Op.Put).build(); } else { // value can be null (table with one primary key integer column, data is encoded in key) mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setOp(Kvrpcpb.Op.Del).build(); } mutations.put(key, mutation); } // groups keys by region GroupKeyResult groupResult = this.groupKeysByRegion(keys, size); List<BatchKeys> batchKeyList = new LinkedList<>(); Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult(); for (Pair<TiRegion, Metapb.Store> pair : groupKeyMap.keySet()) { TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; this.appendBatchBySize(batchKeyList, tiRegion, store, groupKeyMap.get(pair), true, mutations); } // For prewrite, stop sending other requests after receiving first error. for (BatchKeys batchKeys : batchKeyList) { TiRegion oldRegion = batchKeys.getRegion(); TiRegion currentRegion = this.regionManager.getRegionById(oldRegion.getId()); if (oldRegion.equals(currentRegion)) { doPrewriteSecondaryKeySingleBatchWithRetry(backOffer, primaryKey, batchKeys, mutations); } else { if (level > MAX_RETRY_TIMES) { throw new TiBatchWriteException( String.format( "> max retry number %s, oldRegion=%s, currentRegion=%s", MAX_RETRY_TIMES, oldRegion, currentRegion)); } LOG.debug( String.format( "oldRegion=%s != currentRegion=%s, will refetch region info and retry", oldRegion, currentRegion)); retryPrewriteBatch(backOffer, primaryKey, batchKeys, mutations, level <= 0 ? 1 : level + 1); } } } private void retryPrewriteBatch( BackOffer backOffer, ByteString primaryKey, BatchKeys batchKeys, Map<ByteString, Kvrpcpb.Mutation> mutations, int level) { int size = batchKeys.getKeys().size(); ByteString[] keyBytes = new ByteString[size]; ByteString[] valueBytes = new ByteString[size]; int i = 0; for (ByteString k : batchKeys.getKeys()) { keyBytes[i] = k; valueBytes[i] = mutations.get(k).getValue(); i++; } doPrewriteSecondaryKeysInBatchesWithRetry( backOffer, primaryKey, keyBytes, valueBytes, size, level); } private void doPrewriteSecondaryKeySingleBatchWithRetry( BackOffer backOffer, ByteString primaryKey, BatchKeys batchKeys, Map<ByteString, Kvrpcpb.Mutation> mutations) throws TiBatchWriteException { LOG.debug("start prewrite secondary key, size={}", batchKeys.getKeys().size()); List<ByteString> keyList = batchKeys.getKeys(); int batchSize = keyList.size(); List<Kvrpcpb.Mutation> mutationList = new ArrayList<>(batchSize); for (ByteString key : keyList) { mutationList.add(mutations.get(key)); } // send rpc request to tikv server int txnSize = batchKeys.getKeys().size(); long lockTTL = getTxnLockTTL(this.startTs, txnSize); ClientRPCResult prewriteResult = this.kvClient.prewrite( backOffer, mutationList, primaryKey, lockTTL, this.startTs, batchKeys.getRegion(), batchKeys.getStore()); if (!prewriteResult.isSuccess() && !prewriteResult.isRetry()) { throw new TiBatchWriteException( "prewrite secondary key error", prewriteResult.getException()); } if (prewriteResult.isRetry()) { LOG.debug("prewrite secondary key fail, will backoff and retry"); try { backOffer.doBackOff( BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException( String.format( "Txn prewrite secondary key SingleBatch failed, regionId=%s", batchKeys.getRegion().getId()), prewriteResult.getException())); // re-split keys and commit again. retryPrewriteBatch(backOffer, primaryKey, batchKeys, mutations, 0); } catch (GrpcException e) { String errorMsg = String.format( "Txn prewrite secondary key SingleBatch error, re-split commit failed, regionId=%s, detail=%s", batchKeys.getRegion().getId(), e.getMessage()); throw new TiBatchWriteException(errorMsg, e); } } LOG.debug("prewrite secondary key successfully, size={}", batchKeys.getKeys().size()); } private void appendBatchBySize( List<BatchKeys> batchKeyList, TiRegion tiRegion, Metapb.Store store, List<ByteString> keys, boolean sizeIncludeValue, Map<ByteString, Kvrpcpb.Mutation> mutations) { int start; int end; int len = keys.size(); for (start = 0; start < len; start = end) { int size = 0; for (end = start; end < len && size < TXN_COMMIT_BATCH_SIZE; end++) { if (sizeIncludeValue) { size += this.keyValueSize(keys.get(end), mutations); } else { size += this.keySize(keys.get(end)); } } BatchKeys batchKeys = new BatchKeys(tiRegion, store, keys.subList(start, end)); batchKeyList.add(batchKeys); } } private long keyValueSize(ByteString key, Map<ByteString, Kvrpcpb.Mutation> mutations) { long size = key.size(); Kvrpcpb.Mutation mutation = mutations.get(key); if (mutation != null) { size += mutation.getValue().toByteArray().length; } return size; } private long keySize(ByteString key) { return key.size(); } /** * 2pc - commit secondary keys * * @param keys * @param commitTs * @return */ public void commitSecondaryKeys(Iterator<ByteWrapper> keys, long commitTs) throws TiBatchWriteException { Iterator<ByteString> byteStringKeys = new Iterator<ByteString>() { @Override public boolean hasNext() { return keys.hasNext(); } @Override public ByteString next() { return ByteString.copyFrom(keys.next().bytes); } }; doCommitSecondaryKeys(byteStringKeys, commitTs); } private void doCommitSecondaryKeys(Iterator<ByteString> keys, long commitTs) throws TiBatchWriteException { LOG.debug("start commit secondary key"); int totalSize = 0; while (keys.hasNext()) { ByteString[] keyBytes = new ByteString[WRITE_BUFFER_SIZE]; int size = 0; for (int i = 0; i < WRITE_BUFFER_SIZE; i++) { if (keys.hasNext()) { keyBytes[size] = keys.next(); size++; } else { break; } } totalSize = totalSize + size; BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(BackOffer.BATCH_COMMIT_BACKOFF); doCommitSecondaryKeys(backOffer, keyBytes, size, commitTs); } LOG.debug("commit secondary key successfully, total size={}", totalSize); } private void doCommitSecondaryKeys( BackOffer backOffer, ByteString[] keys, int size, long commitTs) throws TiBatchWriteException { if (keys == null || keys.length == 0 || size <= 0) { return; } // groups keys by region GroupKeyResult groupResult = this.groupKeysByRegion(keys, size); List<BatchKeys> batchKeyList = new LinkedList<>(); Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult(); for (Pair<TiRegion, Metapb.Store> pair : groupKeyMap.keySet()) { TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; this.appendBatchBySize(batchKeyList, tiRegion, store, groupKeyMap.get(pair), false, null); } // For prewrite, stop sending other requests after receiving first error. for (BatchKeys batchKeys : batchKeyList) { doCommitSecondaryKeySingleBatch(backOffer, batchKeys, commitTs); } } private void doCommitSecondaryKeySingleBatch( BackOffer backOffer, BatchKeys batchKeys, long commitTs) throws TiBatchWriteException { List<ByteString> keysCommit = batchKeys.getKeys(); ByteString[] keys = new ByteString[keysCommit.size()]; keysCommit.toArray(keys); // send rpc request to tikv server ClientRPCResult commitResult = this.kvClient.commit( backOffer, keys, this.startTs, commitTs, batchKeys.getRegion(), batchKeys.getStore()); if (!commitResult.isSuccess()) { String error = String.format("Txn commit secondary key error, regionId=%s", batchKeys.getRegion()); LOG.warn(error); throw new TiBatchWriteException("commit secondary key error", commitResult.getException()); } LOG.debug("commit {} rows successfully", batchKeys.getKeys().size()); } private GroupKeyResult groupKeysByRegion(ByteString[] keys, int size) throws TiBatchWriteException { Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groups = new HashMap<>(); int index = 0; try { for (; index < size; index++) { ByteString key = keys[index]; Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key); if (pair != null) { groups.computeIfAbsent(pair, e -> new LinkedList<>()).add(key); } } } catch (Exception e) { throw new TiBatchWriteException("Txn groupKeysByRegion error", e); } GroupKeyResult result = new GroupKeyResult(); result.setGroupsResult(groups); return result; } private long getTxnLockTTL(long startTime) { // TODO: calculate txn lock ttl return DEFAULT_BATCH_WRITE_LOCK_TTL; } private long getTxnLockTTL(long startTime, int txnSize) { // TODO: calculate txn lock ttl return DEFAULT_BATCH_WRITE_LOCK_TTL; } }
1
10,590
3.6 seconds? what does it stand for?
pingcap-tispark
java
@@ -834,6 +834,19 @@ module Beaker sign_certificate_for(default) end + # Get a facter fact from a provided host + # + # @param [Host] host The host to query the fact for + # @param [String] name The name of the fact to query for + # @!macro common_opts + # + # @returns String The value of the fact 'name' on the provided host + # @raise [FailTest] Raises an exception if call to facter fails + def fact host, name, opts= {} + result = on host, facter(name, opts) + result.stdout.chomp + end + end end end
1
require 'resolv' require 'inifile' require 'timeout' require 'beaker/dsl/outcomes' module Beaker module DSL # This is the heart of the Puppet Acceptance DSL. Here you find a helper # to proxy commands to hosts, more commands to move files between hosts # and execute remote scripts, confine test cases to certain hosts and # prepare the state of a test case. # # To mix this is into a class you need the following: # * a method *hosts* that yields any hosts implementing # {Beaker::Host}'s interface to act upon. # * a method *logger* that yields a logger implementing # {Beaker::Logger}'s interface. # * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing # {Beaker::Host}'s interface to act upon # * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation # # # @api dsl module Helpers # @!macro common_opts # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Boolean] :silent (false) Do not produce log output # @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array # (or range) of integer exit codes that should be considered # acceptable. An error will be thrown if the exit code does not # match one of the values in this list. # @option opts [Hash{String=>String}] :environment ({}) These will be # treated as extra environment variables that should be set before # running the command. # # The primary method for executing commands *on* some set of hosts. # # @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon, # or a role (String or Symbol) that identifies one or more hosts. # @param [String, Command] command The command to execute on *host*. # @param [Proc] block Additional actions or assertions. # @!macro common_opts # # @example Most basic usage # on hosts, 'ls /tmp' # # @example Allowing additional exit codes to pass # on agents, 'puppet agent -t', :acceptable_exit_codes => [0,2] # # @example Using the returned result for any kind of checking # if on(host, 'ls -la ~').stdout =~ /\.bin/ # ...do some action... # end # # @example Using TestCase helpers from within a test. # agents.each do |agent| # on agent, 'cat /etc/puppet/puppet.conf' do # assert_match stdout, /server = #{master}/, 'WTF Mate' # end # end # # @example Using a role (defined in a String) to identify the host # on "master", "echo hello" # # @example Using a role (defined in a Symbol) to identify the host # on :dashboard, "echo hello" # # @return [Result] An object representing the outcome of *command*. # @raise [FailTest] Raises an exception if *command* obviously fails. def on(host, command, opts = {}, &block) unless command.is_a? Command cmd_opts = opts[:environment] ? { 'ENV' => opts.delete(:environment) } : Hash.new command = Command.new(command.to_s, [], cmd_opts) end if host.is_a? String or host.is_a? Symbol host = hosts_as(host) #check by role end if host.is_a? Array host.map { |h| on h, command, opts, &block } else @result = host.exec(command, opts) # Also, let additional checking be performed by the caller. yield self if block_given? return @result end end # The method for executing commands on the default host # # @param [String, Command] command The command to execute on *host*. # @param [Proc] block Additional actions or assertions. # @!macro common_opts # # @example Most basic usage # shell 'ls /tmp' # # @example Allowing additional exit codes to pass # shell 'puppet agent -t', :acceptable_exit_codes => [0,2] # # @example Using the returned result for any kind of checking # if shell('ls -la ~').stdout =~ /\.bin/ # ...do some action... # end # # @example Using TestCase helpers from within a test. # agents.each do |agent| # shell('cat /etc/puppet/puppet.conf') do |result| # assert_match result.stdout, /server = #{master}/, 'WTF Mate' # end # end # # @return [Result] An object representing the outcome of *command*. # @raise [FailTest] Raises an exception if *command* obviously fails. def shell(command, opts = {}, &block) on(default, command, opts, &block) end # @deprecated # An proxy for the last {Beaker::Result#stdout} returned by # a method that makes remote calls. Use the {Beaker::Result} # object returned by the method directly instead. For Usage see # {Beaker::Result}. def stdout return nil if @result.nil? @result.stdout end # @deprecated # An proxy for the last {Beaker::Result#stderr} returned by # a method that makes remote calls. Use the {Beaker::Result} # object returned by the method directly instead. For Usage see # {Beaker::Result}. def stderr return nil if @result.nil? @result.stderr end # @deprecated # An proxy for the last {Beaker::Result#exit_code} returned by # a method that makes remote calls. Use the {Beaker::Result} # object returned by the method directly instead. For Usage see # {Beaker::Result}. def exit_code return nil if @result.nil? @result.exit_code end # Move a file from a remote to a local path # @note If using {Beaker::Host} for the hosts *scp* is not # required on the system as it uses Ruby's net/scp library. The # net-scp gem however is required (and specified in the gemspec). # # @param [Host, #do_scp_from] host One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_from}. # @param [String] from_path A remote path to a file. # @param [String] to_path A local path to copy *from_path* to. # @!macro common_opts # # @return [Result] Returns the result of the SCP operation def scp_from host, from_path, to_path, opts = {} if host.is_a? Array host.each { |h| scp_from h, from_path, to_path, opts } else @result = host.do_scp_from(from_path, to_path, opts) @result.log logger end end # Move a local file to a remote host # @note If using {Beaker::Host} for the hosts *scp* is not # required on the system as it uses Ruby's net/scp library. The # net-scp gem however is required (and specified in the gemspec. # # @param [Host, #do_scp_to] host One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_to}. # @param [String] from_path A local path to a file. # @param [String] to_path A remote path to copy *from_path* to. # @!macro common_opts # # @return [Result] Returns the result of the SCP operation def scp_to host, from_path, to_path, opts = {} if host.is_a? Array host.each { |h| scp_to h, from_path, to_path, opts } else @result = host.do_scp_to(from_path, to_path, opts) @result.log logger end end # Check to see if a package is installed on a remote host # # @param [Host] host A host object # @param [String] package_name Name of the package to check for. # # @return [Boolean] true/false if the package is found def check_for_package host, package_name host.check_for_package package_name end # Install a package on a host # # @param [Host] host A host object # @param [String] package_name Name of the package to install # # @return [Result] An object representing the outcome of *install command*. def install_package host, package_name host.install_package package_name end # Create a remote file out of a string # @note This method uses Tempfile in Ruby's STDLIB as well as {#scp_to}. # # @param [Host, #do_scp_to] hosts One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_from}. # @param [String] file_path A remote path to place *file_content* at. # @param [String] file_content The contents of the file to be placed. # @!macro common_opts # # @return [Result] Returns the result of the underlying SCP operation. def create_remote_file(hosts, file_path, file_content, opts = {}) Tempfile.open 'beaker' do |tempfile| File.open(tempfile.path, 'w') {|file| file.puts file_content } scp_to hosts, tempfile.path, file_path, opts end end # Move a local script to a remote host and execute it # @note this relies on {#on} and {#scp_to} # # @param [Host, #do_scp_to] host One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_from}. # @param [String] script A local path to find an executable script at. # @!macro common_opts # @param [Proc] block Additional tests to run after script has executed # # @return [Result] Returns the result of the underlying SCP operation. def run_script_on(host, script, opts = {}, &block) # this is unsafe as it uses the File::SEPARATOR will be set to that # of the coordinator node. This works for us because we use cygwin # which will properly convert the paths. Otherwise this would not # work for running tests on a windows machine when the coordinator # that the harness is running on is *nix. We should use # {Beaker::Host#temp_path} instead. TODO remote_path = File.join("", "tmp", File.basename(script)) scp_to host, script, remote_path on host, remote_path, opts, &block end # Move a local script to default host and execute it # @see #run_script_on def run_script(script, opts = {}, &block) run_script_on(default, script, opts, &block) end # Limit the hosts a test case is run against # @note This will modify the {Beaker::TestCase#hosts} member # in place unless an array of hosts is passed into it and # {Beaker::TestCase#logger} yielding an object that responds # like {Beaker::Logger#warn}, as well as # {Beaker::DSL::Outcomes#skip_test}, and optionally # {Beaker::TestCase#hosts}. # # @param [Symbol] type The type of confinement to do. Valid parameters # are *:to* to confine the hosts to only those that # match *criteria* or *:except* to confine the test # case to only those hosts that do not match # criteria. # @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}] # criteria Specify the criteria with which a host should be # considered for inclusion or exclusion. The key is any attribute # of the host that will be yielded by {Beaker::Host#[]}. # The value can be any string/regex or array of strings/regexp. # The values are compared using [Enumerable#any?] so that if one # value of an array matches the host is considered a match for that # criteria. # @param [Array<Host>] host_array This creatively named parameter is # an optional array of hosts to confine to. If not passed in, this # method will modify {Beaker::TestCase#hosts} in place. # @param [Proc] block Addition checks to determine suitability of hosts # for confinement. Each host that is still valid after checking # *criteria* is then passed in turn into this block. The block # should return true if the host matches this additional criteria. # # @example Basic usage to confine to debian OSes. # confine :to, :platform => 'debian' # # @example Confining to anything but Windows and Solaris # confine :except, :platform => ['windows', 'solaris'] # # @example Using additional block to confine to Solaris global zone. # confine :to, :platform => 'solaris' do |solaris| # on( solaris, 'zonename' ) =~ /global/ # end # # @return [Array<Host>] Returns an array of hosts that are still valid # targets for this tests case. # @raise [SkipTest] Raises skip test if there are no valid hosts for # this test case after confinement. def confine(type, criteria, host_array = nil, &block) provided_hosts = host_array ? true : false hosts_to_modify = host_array || hosts criteria.each_pair do |property, value| case type when :except hosts_to_modify = hosts_to_modify.reject do |host| inspect_host host, property, value end if block_given? hosts_to_modify = hosts_to_modify.reject do |host| yield host end end when :to hosts_to_modify = hosts_to_modify.select do |host| inspect_host host, property, value end if block_given? hosts_to_modify = hosts_to_modify.select do |host| yield host end end else raise "Unknown option #{type}" end end if hosts_to_modify.empty? logger.warn "No suitable hosts with: #{criteria.inspect}" skip_test 'No suitable hosts found' end self.hosts = hosts_to_modify hosts_to_modify end # Ensures that host restrictions as specifid by type, criteria and # host_array are confined to activity within the passed block. # TestCase#hosts is reset after block has executed. # # @see #confine def confine_block(type, criteria, host_array = nil, &block) begin original_hosts = self.hosts.dup confine(type, criteria, host_array) yield ensure self.hosts = original_hosts end end # @!visibility private def inspect_host(host, property, one_or_more_values) values = Array(one_or_more_values) return values.any? do |value| true_false = false case value when String true_false = host[property.to_s].include? value when Regexp true_false = host[property.to_s] =~ value end true_false end end # Test Puppet running in a certain run mode with specific options. # This ensures the following steps are performed: # 1. The pre-test Puppet configuration is backed up # 2. A new Puppet configuraton file is layed down # 3. Puppet is started or restarted in the specified run mode # 4. Ensure Puppet has started correctly # 5. Further tests are yielded to # 6. Revert Puppet to the pre-test state # 7. Testing artifacts are saved in a folder named for the test # # @param [Host] host One object that act like Host # # @param [Hash{Symbol=>String}] conf_opts Represents puppet settings. # Sections of the puppet.conf may be # specified, if no section is specified the # a puppet.conf file will be written with the # options put in a section named after [mode] # # There is a special setting for command_line # arguments such as --debug or --logdest, which # cannot be set in puppet.conf. For example: # # :__commandline_args__ => '--logdest /tmp/a.log' # # These will only be applied when starting a FOSS # master, as a pe master is just bounced. # # @param [File] testdir The temporary directory which will hold backup # configuration, and other test artifacts. # # @param [Block] block The point of this method, yields so # tests may be ran. After the block is finished # puppet will revert to a previous state. # # @example A simple use case to ensure a master is running # with_puppet_running_on( master ) do # ...tests that require a master... # end # # @example Fully utilizing the possiblities of config options # with_puppet_running_on( master, # :main => {:logdest => '/var/blah'}, # :master => {:masterlog => '/elswhere'}, # :agent => {:server => 'localhost'} ) do # # ...tests to be ran... # end # # @api dsl def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash) cmdline_args = conf_opts.delete(:__commandline_args__) begin backup_file = backup_the_file(host, host['puppetpath'], testdir, 'puppet.conf') lay_down_new_puppet_conf host, conf_opts, testdir if host.is_pe? bounce_service( host, 'pe-httpd' ) else puppet_master_started = start_puppet_from_source_on!( host, cmdline_args ) end yield self if block_given? rescue Exception => early_exception original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n") raise(original_exception) ensure begin restore_puppet_conf_from_backup( host, backup_file ) if host.is_pe? bounce_service( host, 'pe-httpd' ) else stop_puppet_from_source_on( host ) if puppet_master_started end rescue Exception => teardown_exception if original_exception logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n") raise original_exception else raise teardown_exception end end end end # Test Puppet running in a certain run mode with specific options, # on the default host # @api dsl # @see #with_puppet_running_on def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block with_puppet_running_on(default, conf_opts, testdir, &block) end # @!visibility private def restore_puppet_conf_from_backup( host, backup_file ) puppetpath = host['puppetpath'] host.exec( Command.new( "if [ -f #{backup_file} ]; then " + "cat #{backup_file} > " + "#{puppetpath}/puppet.conf; " + "rm -f #{backup_file}; " + "fi" ) ) end # @!visibility private def backup_the_file host, current_dir, new_dir, filename = 'puppet.conf' old_location = current_dir + '/' + filename new_location = new_dir + '/' + filename + '.bak' host.exec( Command.new( "cp #{old_location} #{new_location}" ) ) return new_location end # @!visibility private def start_puppet_from_source_on! host, args = '' host.exec( puppet( 'master', args ) ) logger.debug 'Waiting for the puppet master to start' unless port_open_within?( host, 8140, 10 ) dump_puppet_log(host) raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion' end logger.debug 'The puppet master has started' return true end # @!visibility private def stop_puppet_from_source_on( host ) pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp host.exec( Command.new( "kill #{pid}" ) ) Timeout.timeout(10) do while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do # until kill -0 finds no process and we know that puppet has finished cleaning up sleep 1 end end rescue RuntimeError => e dump_puppet_log host raise e end # @!visibility private def dump_puppet_log(host) syslogfile = case host['platform'] when /fedora|centos|el/ then '/var/log/messages' when /ubuntu|debian/ then '/var/log/syslog' else return end logger.notify "\n*************************" logger.notify "* Dumping master log *" logger.notify "*************************" host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1]) logger.notify "*************************\n" end # @!visibility private def lay_down_new_puppet_conf( host, configuration_options, testdir ) new_conf = puppet_conf_for( host, configuration_options ) create_remote_file host, "#{testdir}/puppet.conf", new_conf.to_s host.exec( Command.new( "cat #{testdir}/puppet.conf > #{host['puppetpath']}/puppet.conf" ), :silent => true ) host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ) end # @!visibility private def puppet_conf_for host, conf_opts puppetconf = host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ).stdout new_conf = IniFile.new( puppetconf ).merge( conf_opts ) new_conf end # @!visibility private def bounce_service host, service # Any reason to not # host.exec puppet_resource( 'service', service, 'ensure=stopped' ) # host.exec puppet_resource( 'service', service, 'ensure=running' ) host.exec( Command.new( "/etc/init.d/#{service} restart" ) ) end # Blocks until the port is open on the host specified, returns false # on failure def port_open_within?( host, port = 8140, seconds = 120 ) repeat_for( seconds ) do host.port_open?( port ) end end # Runs 'puppet apply' on a remote host, piping manifest through stdin # # @param [Host] host The host that this command should be run on # # @param [String] manifest The puppet manifest to apply # # @!macro common_opts # @option opts [Boolean] :parseonly (false) If this key is true, the # "--parseonly" command line parameter will # be passed to the 'puppet apply' command. # # @option opts [Boolean] :trace (false) If this key exists in the Hash, # the "--trace" command line parameter will be # passed to the 'puppet apply' command. # # @option opts [Boolean] :catch_failures (false) By default # "puppet --apply" will exit with 0, # which does not count as a test # failure, even if there were errors applying # the manifest. This option enables detailed # exit codes and causes a test failure if # "puppet --apply" indicates there was a # failure during its execution. # # @param [Block] block This method will yield to a block of code passed # by the caller; this can be used for additional # validation, etc. # def apply_manifest_on(host, manifest, opts = {}, &block) on_options = {:stdin => manifest + "\n"} on_options[:acceptable_exit_codes] = opts.delete(:acceptable_exit_codes) args = ["--verbose"] args << "--parseonly" if opts[:parseonly] args << "--trace" if opts[:trace] if opts[:catch_failures] args << '--detailed-exitcodes' # From puppet help: # "... an exit code of '2' means there were changes, an exit code of # '4' means there were failures during the transaction, and an exit # code of '6' means there were both changes and failures." # We're after failures specifically so catch exit codes 4 and 6 only. on_options[:acceptable_exit_codes] |= [0, 2] end # Not really thrilled with this implementation, might want to improve it # later. Basically, there is a magic trick in the constructor of # PuppetCommand which allows you to pass in a Hash for the last value in # the *args Array; if you do so, it will be treated specially. So, here # we check to see if our caller passed us a hash of environment variables # that they want to set for the puppet command. If so, we set the final # value of *args to a new hash with just one entry (the value of which # is our environment variables hash) if opts.has_key?(:environment) args << { :environment => opts[:environment]} end on host, puppet( 'apply', *args), on_options, &block end # Runs 'puppet apply' on default host, piping manifest through stdin # @see #apply_manifest_on def apply_manifest(manifest, opts = {}, &block) apply_manifest_on(default, manifest, opts, &block) end # @deprecated def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test', options={}, &block) if host.is_a? Array host.each { |h| run_agent_on h, arg, options, &block } else on host, puppet_agent(arg), options, &block end end # FIX: this should be moved into host/platform # @visibility private def run_cron_on(host, action, user, entry="", &block) platform = host['platform'] if platform.include?('solaris') || platform.include?('aix') then case action when :list then args = '-l' when :remove then args = '-r' when :add on( host, "echo '#{entry}' > /var/spool/cron/crontabs/#{user}", &block ) end else # default for GNU/Linux platforms case action when :list then args = '-l -u' when :remove then args = '-r -u' when :add on( host, "echo '#{entry}' > /tmp/#{user}.cron && " + "crontab -u #{user} /tmp/#{user}.cron", &block ) end end if args case action when :list, :remove then on(host, "crontab #{args} #{user}", &block) end end end # This method accepts a block and using the puppet resource 'host' will # setup host aliases before and after that block. # # A teardown step is also added to make sure unstubbing of the host is # removed always. # # @param machine [String] the host to execute this stub # @param ip_spec [Hash{String=>String}] a hash containing the host to ip # mappings # @example Stub puppetlabs.com on the master to 127.0.0.1 # stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1') def stub_hosts_on(machine, ip_spec) ip_spec.each do |host, ip| logger.notify("Stubbing host #{host} to IP #{ip} on machine #{machine}") on( machine, puppet('resource', 'host', host, 'ensure=present', "ip=#{ip}") ) end teardown do ip_spec.each do |host, ip| logger.notify("Unstubbing host #{host} to IP #{ip} on machine #{machine}") on( machine, puppet('resource', 'host', host, 'ensure=absent') ) end end end # This method accepts a block and using the puppet resource 'host' will # setup host aliases before and after that block on the default host # # @example Stub puppetlabs.com on the default host to 127.0.0.1 # stub_hosts('puppetlabs.com' => '127.0.0.1') # @see #stub_hosts_on def stub_hosts(ip_spec) stub_hosts_on(default, ip_spec) end # This wraps the method `stub_hosts_on` and makes the stub specific to # the forge alias. # # @param machine [String] the host to perform the stub on def stub_forge_on(machine) @forge_ip ||= Resolv.getaddress(forge) stub_hosts_on(machine, 'forge.puppetlabs.com' => @forge_ip) end # This wraps the method `stub_hosts` and makes the stub specific to # the forge alias. # # @see #stub_forge_on def stub_forge stub_forge_on(default) end def sleep_until_puppetdb_started(host) curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120) curl_with_retries("start puppetdb (ssl)", host, "https://#{host.node_name}:8081", [35, 60]) end def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1) retry_command(desc, host, "curl #{url}", desired_exit_codes, max_retries, retry_interval) end def retry_command(desc, host, command, desired_exit_codes = 0, max_retries = 60, retry_interval = 1) desired_exit_codes = [desired_exit_codes].flatten result = on host, command, :acceptable_exit_codes => (0...127) num_retries = 0 until desired_exit_codes.include?(result.exit_code) sleep retry_interval result = on host, command, :acceptable_exit_codes => (0...127) num_retries += 1 if (num_retries > max_retries) fail("Unable to #{desc}") end end end #stops the puppet agent running on the host def stop_agent_on(agent) vardir = agent.puppet['vardir'] agent_running = true while agent_running result = on agent, "[ -e '#{vardir}/state/agent_catalog_run.lock' ]", :acceptable_exit_codes => [0,1] agent_running = (result.exit_code == 0) sleep 2 unless agent_running end if agent['platform'].include?('solaris') on(agent, '/usr/sbin/svcadm disable -s svc:/network/pe-puppet:default') elsif agent['platform'].include?('aix') on(agent, '/usr/bin/stopsrc -s pe-puppet') elsif agent['platform'].include?('windows') on(agent, 'net stop pe-puppet', :acceptable_exit_codes => [0,2]) else # For the sake of not passing the PE version into this method, # we just query the system to find out which service we want to # stop result = on agent, "[ -e /etc/init.d/pe-puppet-agent ]", :acceptable_exit_codes => [0,1] service = (result.exit_code == 0) ? 'pe-puppet-agent' : 'pe-puppet' on(agent, "/etc/init.d/#{service} stop") end end #stops the puppet agent running on the default host # @see #stop_agent_on def stop_agent stop_agent_on(default) end #wait for a given host to appear in the dashboard def wait_for_host_in_dashboard(host) hostname = host.node_name retry_command("Wait for #{hostname} to be in the console", dashboard, "! curl --sslv3 -k -I https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'") end # Ensure the host has requested a cert, then sign it # # @param [Host] host The host to sign for # # @returns nil # @raise [FailTest] if process times out def sign_certificate_for(host) if [master, dashboard, database].include? host on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2] on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24] else hostname = Regexp.escape host.node_name last_sleep = 0 next_sleep = 1 (0..10).each do |i| fail_test("Failed to sign cert for #{hostname}") if i == 10 on master, puppet("cert --sign --all"), :acceptable_exit_codes => [0,24] break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/ sleep next_sleep (last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep end end end #prompt the master to sign certs then check to confirm the cert for the default host is signed #@see #sign_certificate_for def sign_certificate sign_certificate_for(default) end end end end
1
4,768
If the command fails, is stdout nil or ""?
voxpupuli-beaker
rb
@@ -84,10 +84,10 @@ public interface AutoRestValidationTest { * used by Retrofit to perform actually REST calls. */ interface AutoRestValidationTestService { - @GET("/fakepath/{subscriptionId}/{resourceGroupName}/{id}?api-version={apiVersion}") + @GET("/fakepath/{subscriptionId}/{resourceGroupName}/{id}") Call<ResponseBody> validationOfMethodParameters(@Path("subscriptionId") String subscriptionId, @Path("resourceGroupName") String resourceGroupName, @Path("id") int id, @Query("apiVersion") String apiVersion); - @PUT("/fakepath/{subscriptionId}/{resourceGroupName}/{id}?api-version={apiVersion}") + @PUT("/fakepath/{subscriptionId}/{resourceGroupName}/{id}") Call<ResponseBody> validationOfBody(@Path("subscriptionId") String subscriptionId, @Path("resourceGroupName") String resourceGroupName, @Path("id") int id, @Body Product body, @Query("apiVersion") String apiVersion); }
1
/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator 0.14.0.0 * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ package fixtures.validation; import java.util.List; import com.squareup.okhttp.Interceptor; import com.squareup.okhttp.logging.HttpLoggingInterceptor.Level; import com.microsoft.rest.ServiceCallback; import com.microsoft.rest.ServiceResponse; import com.squareup.okhttp.ResponseBody; import fixtures.validation.models.ErrorException; import fixtures.validation.models.Product; import java.io.IOException; import retrofit.Call; import retrofit.http.Body; import retrofit.http.GET; import retrofit.http.Path; import retrofit.http.PUT; import retrofit.http.Query; /** * The interface for AutoRestValidationTest class. */ public interface AutoRestValidationTest { /** * Gets the URI used as the base for all cloud service requests. * * @return the BaseUri value. */ String getBaseUri(); /** * Gets the list of interceptors the OkHttp client will execute. * * @return the list of interceptors. */ List<Interceptor> getClientInterceptors(); /** * Sets the logging level for OkHttp client. * * @param logLevel the logging level enum. */ void setLogLevel(Level logLevel); /** * Gets Subscription ID.. * * @return the subscriptionId value. */ String getSubscriptionId(); /** * Sets Subscription ID.. * * @param subscriptionId the subscriptionId value. */ void setSubscriptionId(String subscriptionId); /** * Gets Required string following pattern \d{2}-\d{2}-\d{4}. * * @return the apiVersion value. */ String getApiVersion(); /** * Sets Required string following pattern \d{2}-\d{2}-\d{4}. * * @param apiVersion the apiVersion value. */ void setApiVersion(String apiVersion); /** * The interface defining all the services for AutoRestValidationTest to be * used by Retrofit to perform actually REST calls. */ interface AutoRestValidationTestService { @GET("/fakepath/{subscriptionId}/{resourceGroupName}/{id}?api-version={apiVersion}") Call<ResponseBody> validationOfMethodParameters(@Path("subscriptionId") String subscriptionId, @Path("resourceGroupName") String resourceGroupName, @Path("id") int id, @Query("apiVersion") String apiVersion); @PUT("/fakepath/{subscriptionId}/{resourceGroupName}/{id}?api-version={apiVersion}") Call<ResponseBody> validationOfBody(@Path("subscriptionId") String subscriptionId, @Path("resourceGroupName") String resourceGroupName, @Path("id") int id, @Body Product body, @Query("apiVersion") String apiVersion); } /** * Validates input parameters on the method. See swagger for details. * * @param resourceGroupName Required string between 3 and 10 chars with pattern [a-zA-Z0-9]+. * @param id Required int multiple of 10 from 100 to 1000. * @throws ErrorException exception thrown from REST call * @throws IOException exception thrown from serialization/deserialization * @throws IllegalArgumentException exception thrown from invalid parameters * @return the Product object wrapped in {@link ServiceResponse} if successful. */ ServiceResponse<Product> validationOfMethodParameters(String resourceGroupName, int id) throws ErrorException, IOException, IllegalArgumentException; /** * Validates input parameters on the method. See swagger for details. * * @param resourceGroupName Required string between 3 and 10 chars with pattern [a-zA-Z0-9]+. * @param id Required int multiple of 10 from 100 to 1000. * @param serviceCallback the async ServiceCallback to handle successful and failed responses. * @return the {@link Call} object */ Call<ResponseBody> validationOfMethodParametersAsync(String resourceGroupName, int id, final ServiceCallback<Product> serviceCallback); /** * Validates body parameters on the method. See swagger for details. * * @param resourceGroupName Required string between 3 and 10 chars with pattern [a-zA-Z0-9]+. * @param id Required int multiple of 10 from 100 to 1000. * @param body the Product value * @throws ErrorException exception thrown from REST call * @throws IOException exception thrown from serialization/deserialization * @throws IllegalArgumentException exception thrown from invalid parameters * @return the Product object wrapped in {@link ServiceResponse} if successful. */ ServiceResponse<Product> validationOfBody(String resourceGroupName, int id, Product body) throws ErrorException, IOException, IllegalArgumentException; /** * Validates body parameters on the method. See swagger for details. * * @param resourceGroupName Required string between 3 and 10 chars with pattern [a-zA-Z0-9]+. * @param id Required int multiple of 10 from 100 to 1000. * @param body the Product value * @param serviceCallback the async ServiceCallback to handle successful and failed responses. * @return the {@link Call} object */ Call<ResponseBody> validationOfBodyAsync(String resourceGroupName, int id, Product body, final ServiceCallback<Product> serviceCallback); }
1
21,481
Why is api-version now removed in the generated code?
Azure-autorest
java
@@ -0,0 +1,3 @@ +from mmdet.utils import Registry + +OPTIMIZERS = Registry('optimizer')
1
1
18,601
We may register all built-in optimizers of PyTorch here to simplify the builder.
open-mmlab-mmdetection
py
@@ -109,7 +109,8 @@ std::string BaseGenerator::WrapInNameSpace(const Namespace *ns, const std::string &name) const { if (CurrentNameSpace() == ns) return name; std::string qualified_name = qualifying_start_; - for (auto it = ns->components.begin(); it != ns->components.end(); ++it) + auto &components = ns->components; + for (auto it = components.begin(); it != components.end(); ++it) qualified_name += *it + qualifying_separator_; return qualified_name + name; }
1
/* * Copyright 2016 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flatbuffers/code_generators.h" #include <assert.h> #include "flatbuffers/base.h" #include "flatbuffers/util.h" #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable : 4127) // C4127: conditional expression is constant #endif namespace flatbuffers { void CodeWriter::operator+=(std::string text) { while (true) { auto begin = text.find("{{"); if (begin == std::string::npos) { break; } auto end = text.find("}}"); if (end == std::string::npos || end < begin) { break; } // Write all the text before the first {{ into the stream. stream_.write(text.c_str(), begin); // The key is between the {{ and }}. const std::string key = text.substr(begin + 2, end - begin - 2); // Find the value associated with the key. If it exists, write the // value into the stream, otherwise write the key itself into the stream. auto iter = value_map_.find(key); if (iter != value_map_.end()) { const std::string &value = iter->second; stream_ << value; } else { assert(false && "could not find key"); stream_ << key; } // Update the text to everything after the }}. text = text.substr(end + 2); } if (!text.empty() && string_back(text) == '\\') { text.pop_back(); stream_ << text; } else { stream_ << text << std::endl; } } const char *BaseGenerator::FlatBuffersGeneratedWarning() { return "automatically generated by the FlatBuffers compiler," " do not modify"; } std::string BaseGenerator::NamespaceDir(const Parser &parser, const std::string &path, const Namespace &ns) { EnsureDirExists(path.c_str()); if (parser.opts.one_file) return path; std::string namespace_dir = path; // Either empty or ends in separator. auto &namespaces = ns.components; for (auto it = namespaces.begin(); it != namespaces.end(); ++it) { namespace_dir += *it + kPathSeparator; EnsureDirExists(namespace_dir.c_str()); } return namespace_dir; } std::string BaseGenerator::NamespaceDir(const Namespace &ns) const { return BaseGenerator::NamespaceDir(parser_, path_, ns); } std::string BaseGenerator::FullNamespace(const char *separator, const Namespace &ns) { std::string namespace_name; auto &namespaces = ns.components; for (auto it = namespaces.begin(); it != namespaces.end(); ++it) { if (namespace_name.length()) namespace_name += separator; namespace_name += *it; } return namespace_name; } std::string BaseGenerator::LastNamespacePart(const Namespace &ns) { if (!ns.components.empty()) return ns.components.back(); else return std::string(""); } // Ensure that a type is prefixed with its namespace whenever it is used // outside of its namespace. std::string BaseGenerator::WrapInNameSpace(const Namespace *ns, const std::string &name) const { if (CurrentNameSpace() == ns) return name; std::string qualified_name = qualifying_start_; for (auto it = ns->components.begin(); it != ns->components.end(); ++it) qualified_name += *it + qualifying_separator_; return qualified_name + name; } std::string BaseGenerator::WrapInNameSpace(const Definition &def) const { return WrapInNameSpace(def.defined_namespace, def.name); } std::string BaseGenerator::GetNameSpace(const Definition &def) const { const Namespace *ns = def.defined_namespace; if (CurrentNameSpace() == ns) return ""; std::string qualified_name = qualifying_start_; for (auto it = ns->components.begin(); it != ns->components.end(); ++it) { qualified_name += *it; if ((it + 1) != ns->components.end()) { qualified_name += qualifying_separator_; } } return qualified_name; } // Generate a documentation comment, if available. void GenComment(const std::vector<std::string> &dc, std::string *code_ptr, const CommentConfig *config, const char *prefix) { if (dc.begin() == dc.end()) { // Don't output empty comment blocks with 0 lines of comment content. return; } std::string &code = *code_ptr; if (config != nullptr && config->first_line != nullptr) { code += std::string(prefix) + std::string(config->first_line) + "\n"; } std::string line_prefix = std::string(prefix) + ((config != nullptr && config->content_line_prefix != nullptr) ? config->content_line_prefix : "///"); for (auto it = dc.begin(); it != dc.end(); ++it) { code += line_prefix + *it + "\n"; } if (config != nullptr && config->last_line != nullptr) { code += std::string(prefix) + std::string(config->last_line) + "\n"; } } } // namespace flatbuffers #if defined(_MSC_VER) # pragma warning(pop) #endif
1
13,130
No need for this new variable.
google-flatbuffers
java
@@ -87,11 +87,11 @@ export function diff(dom, parentDom, newVNode, oldVNode, context, isSvg, excessD c._vnode = newVNode; // Invoke getDerivedStateFromProps - let s = c._nextState || c.state; + if (c._nextState==null) { + c._nextState = c.state; + } if (newType.getDerivedStateFromProps!=null) { - oldState = assign({}, c.state); - if (s===c.state) s = c._nextState = assign({}, s); - assign(s, newType.getDerivedStateFromProps(newVNode.props, s)); + c.setState(newType.getDerivedStateFromProps(newVNode.props, c._nextState)); } // Invoke pre-render lifecycle methods
1
import { EMPTY_OBJ, EMPTY_ARR } from '../constants'; import { Component, enqueueRender } from '../component'; import { coerceToVNode, Fragment } from '../create-element'; import { diffChildren } from './children'; import { diffProps } from './props'; import { assign, removeNode } from '../util'; import options from '../options'; /** * Diff two virtual nodes and apply proper changes to the DOM * @param {import('../internal').PreactElement | Text} dom The DOM element representing * the virtual nodes under diff * @param {import('../internal').PreactElement} parentDom The parent of the DOM element * @param {import('../internal').VNode | null} newVNode The new virtual node * @param {import('../internal').VNode | null} oldVNode The old virtual node * @param {object} context The current context object * @param {boolean} isSvg Whether or not this element is an SVG node * @param {Array<import('../internal').PreactElement>} excessDomChildren * @param {Array<import('../internal').Component>} mounts A list of newly * mounted components * @param {import('../internal').Component | null} ancestorComponent The direct * parent component */ export function diff(dom, parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent, force) { // If the previous type doesn't match the new type we drop the whole subtree if (oldVNode==null || newVNode==null || oldVNode.type!==newVNode.type) { if (oldVNode!=null) unmount(oldVNode, ancestorComponent); if (newVNode==null) return null; dom = null; oldVNode = EMPTY_OBJ; } if (options.diff) options.diff(newVNode); let c, p, isNew = false, oldProps, oldState, snapshot, newType = newVNode.type; /** @type {import('../internal').Component | null} */ let clearProcessingException; try { outer: if (oldVNode.type===Fragment || newType===Fragment) { diffChildren(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, c); if (newVNode._children.length) { dom = newVNode._children[0]._dom; newVNode._lastDomChild = newVNode._children[newVNode._children.length - 1]._dom; } } else if (typeof newType==='function') { // Necessary for createContext api. Setting this property will pass // the context value as `this.context` just for this component. let cxType = newType.contextType; let provider = cxType && context[cxType._id]; let cctx = cxType != null ? (provider ? provider.props.value : cxType._defaultValue) : context; // Get component and set it to `c` if (oldVNode._component) { c = newVNode._component = oldVNode._component; clearProcessingException = c._processingException; } else { isNew = true; // Instantiate the new component if (newType.prototype && newType.prototype.render) { newVNode._component = c = new newType(newVNode.props, cctx); // eslint-disable-line new-cap } else { newVNode._component = c = new Component(newVNode.props, cctx); c.constructor = newType; c.render = doRender; } c._ancestorComponent = ancestorComponent; if (provider) provider.sub(c); c.props = newVNode.props; if (!c.state) c.state = {}; c.context = cctx; c._context = context; c._dirty = true; c._renderCallbacks = []; } c._vnode = newVNode; // Invoke getDerivedStateFromProps let s = c._nextState || c.state; if (newType.getDerivedStateFromProps!=null) { oldState = assign({}, c.state); if (s===c.state) s = c._nextState = assign({}, s); assign(s, newType.getDerivedStateFromProps(newVNode.props, s)); } // Invoke pre-render lifecycle methods if (isNew) { if (newType.getDerivedStateFromProps==null && c.componentWillMount!=null) c.componentWillMount(); if (c.componentDidMount!=null) mounts.push(c); } else { if (newType.getDerivedStateFromProps==null && force==null && c.componentWillReceiveProps!=null) { c.componentWillReceiveProps(newVNode.props, cctx); s = c._nextState || c.state; } if (!force && c.shouldComponentUpdate!=null && c.shouldComponentUpdate(newVNode.props, s, cctx)===false) { c.props = newVNode.props; c.state = s; c._dirty = false; break outer; } if (c.componentWillUpdate!=null) { c.componentWillUpdate(newVNode.props, s, cctx); } } oldProps = c.props; if (!oldState) oldState = c.state; c.context = cctx; c.props = newVNode.props; c.state = s; if (options.render) options.render(newVNode); let prev = c._prevVNode; let vnode = c._prevVNode = coerceToVNode(c.render(c.props, c.state, c.context)); c._dirty = false; if (c.getChildContext!=null) { context = assign(assign({}, context), c.getChildContext()); } if (!isNew && c.getSnapshotBeforeUpdate!=null) { snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState); } c.base = dom = diff(dom, parentDom, vnode, prev, context, isSvg, excessDomChildren, mounts, c, null); if (vnode!=null) { // If this component returns a Fragment (or another component that // returns a Fragment), then _lastDomChild will be non-null, // informing `diffChildren` to diff this component's VNode like a Fragemnt newVNode._lastDomChild = vnode._lastDomChild; } c._parentDom = parentDom; if (newVNode.ref) applyRef(newVNode.ref, c, ancestorComponent); } else { dom = diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent); if (newVNode.ref && (oldVNode.ref !== newVNode.ref)) { applyRef(newVNode.ref, dom, ancestorComponent); } } newVNode._dom = dom; if (c!=null) { while (p=c._renderCallbacks.pop()) p.call(c); // Don't call componentDidUpdate on mount or when we bailed out via // `shouldComponentUpdate` if (!isNew && oldProps!=null && c.componentDidUpdate!=null) { c.componentDidUpdate(oldProps, oldState, snapshot); } } if (clearProcessingException) { c._processingException = null; } if (options.diffed) options.diffed(newVNode); } catch (e) { catchErrorInComponent(e, ancestorComponent); } return dom; } export function commitRoot(mounts, root) { let c; while ((c = mounts.pop())) { try { c.componentDidMount(); } catch (e) { catchErrorInComponent(e, c._ancestorComponent); } } if (options.commit) options.commit(root); } /** * Diff two virtual nodes representing DOM element * @param {import('../internal').PreactElement} dom The DOM element representing * the virtual nodes being diffed * @param {import('../internal').VNode} newVNode The new virtual node * @param {import('../internal').VNode} oldVNode The old virtual node * @param {object} context The current context object * @param {boolean} isSvg Whether or not this DOM node is an SVG node * @param {*} excessDomChildren * @param {Array<import('../internal').Component>} mounts An array of newly * mounted components * @param {import('../internal').Component} ancestorComponent The parent * component to the ones being diffed * @returns {import('../internal').PreactElement} */ function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent) { let d = dom; // Tracks entering and exiting SVG namespace when descending through the tree. isSvg = newVNode.type==='svg' || isSvg; if (dom==null && excessDomChildren!=null) { for (let i=0; i<excessDomChildren.length; i++) { const child = excessDomChildren[i]; if (child!=null && (newVNode.type===null ? child.nodeType===3 : child.localName===newVNode.type)) { dom = child; excessDomChildren[i] = null; break; } } } if (dom==null) { dom = newVNode.type===null ? document.createTextNode(newVNode.text) : isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement(newVNode.type); // we created a new parent, so none of the previously attached children can be reused: excessDomChildren = null; } newVNode._dom = dom; if (newVNode.type===null) { if ((d===null || dom===d) && newVNode.text!==oldVNode.text) { dom.data = newVNode.text; } } else { if (excessDomChildren!=null && dom.childNodes!=null) { excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes); } if (newVNode!==oldVNode) { let oldProps = oldVNode.props; let newProps = newVNode.props; // if we're hydrating, use the element's attributes as its current props: if (oldProps==null) { oldProps = {}; if (excessDomChildren!=null) { let name; for (let i=0; i<dom.attributes.length; i++) { name = dom.attributes[i].name; oldProps[name=='class' && newProps.className ? 'className' : name] = dom.attributes[i].value; } } } let oldHtml = oldProps.dangerouslySetInnerHTML; let newHtml = newProps.dangerouslySetInnerHTML; if (newHtml || oldHtml) { // Avoid re-applying the same '__html' if it did not changed between re-render if (!newHtml || !oldHtml || newHtml.__html!=oldHtml.__html) { dom.innerHTML = newHtml && newHtml.__html || ''; } } if (newProps.multiple) { dom.multiple = newProps.multiple; } diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, ancestorComponent); diffProps(dom, newProps, oldProps, isSvg); } } return dom; } /** * Invoke or update a ref, depending on whether it is a function or object ref. * @param {object|function} [ref=null] * @param {any} [value] */ export function applyRef(ref, value, ancestorComponent) { try { if (typeof ref=='function') ref(value); else ref.current = value; } catch (e) { catchErrorInComponent(e, ancestorComponent); } } /** * Unmount a virtual node from the tree and apply DOM changes * @param {import('../internal').VNode} vnode The virtual node to unmount * @param {import('../internal').Component} ancestorComponent The parent * component to this virtual node * @param {boolean} skipRemove Flag that indicates that a parent node of the * current element is already detached from the DOM. */ export function unmount(vnode, ancestorComponent, skipRemove) { let r; if (options.unmount) options.unmount(vnode); if (r = vnode.ref) { applyRef(r, null, ancestorComponent); } if (!skipRemove && vnode._lastDomChild==null && (skipRemove = ((r = vnode._dom)!=null))) removeNode(r); vnode._dom = vnode._lastDomChild = null; if ((r = vnode._component)!=null) { if (r.componentWillUnmount) { try { r.componentWillUnmount(); } catch (e) { catchErrorInComponent(e, ancestorComponent); } } r.base = r._parentDom = null; if (r = r._prevVNode) unmount(r, ancestorComponent, skipRemove); } else if (r = vnode._children) { for (let i = 0; i < r.length; i++) { unmount(r[i], ancestorComponent, skipRemove); } } } /** The `.render()` method for a PFC backing instance. */ function doRender(props, state, context) { return this.constructor(props, context); } /** * Find the closest error boundary to a thrown error and call it * @param {object} error The thrown value * @param {import('../internal').Component} component The first ancestor * component check for error boundary behaviors */ function catchErrorInComponent(error, component) { for (; component; component = component._ancestorComponent) { if (!component._processingException) { try { if (component.constructor.getDerivedStateFromError!=null) { component.setState(component.constructor.getDerivedStateFromError(error)); } else if (component.componentDidCatch!=null) { component.componentDidCatch(error); } else { continue; } return enqueueRender(component._processingException = component); } catch (e) { error = e; } } } throw error; }
1
12,872
doesn't this enqueue a double render or is that safeguarded somehow
preactjs-preact
js
@@ -878,7 +878,9 @@ func (a *Account) randomClient() *client { } var c *client for c = range a.clients { - break + if c.acc == a { + break + } } return c }
1
// Copyright 2018-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "bytes" "encoding/hex" "errors" "fmt" "io/ioutil" "math" "math/rand" "net/http" "net/textproto" "net/url" "reflect" "sort" "strconv" "strings" "sync" "time" "github.com/nats-io/jwt/v2" "github.com/nats-io/nkeys" "github.com/nats-io/nuid" ) // For backwards compatibility with NATS < 2.0, users who are not explicitly defined into an // account will be grouped in the default global account. const globalAccountName = DEFAULT_GLOBAL_ACCOUNT // Account are subject namespace definitions. By default no messages are shared between accounts. // You can share via Exports and Imports of Streams and Services. type Account struct { Name string Nkey string Issuer string claimJWT string updated time.Time mu sync.RWMutex sqmu sync.Mutex sl *Sublist ic *client isid uint64 etmr *time.Timer ctmr *time.Timer strack map[string]sconns nrclients int32 sysclients int32 nleafs int32 nrleafs int32 clients map[*client]struct{} rm map[string]int32 lqws map[string]int32 usersRevoked map[string]int64 actsRevoked map[string]int64 mappings []*mapping lleafs []*client imports importMap exports exportMap js *jsAccount jsLimits *JetStreamAccountLimits limits expired bool incomplete bool signingKeys map[string]jwt.Scope srv *Server // server this account is registered with (possibly nil) lds string // loop detection subject for leaf nodes siReply []byte // service reply prefix, will form wildcard subscription. prand *rand.Rand eventIds *nuid.NUID eventIdsMu sync.Mutex defaultPerms *Permissions } // Account based limits. type limits struct { mpay int32 msubs int32 mconns int32 mleafs int32 } // Used to track remote clients and leafnodes per remote server. type sconns struct { conns int32 leafs int32 } // Import stream mapping struct type streamImport struct { acc *Account from string to string tr *transform rtr *transform claim *jwt.Import usePub bool invalid bool } const ClientInfoHdr = "Nats-Request-Info" // Import service mapping struct type serviceImport struct { acc *Account claim *jwt.Import se *serviceExport sid []byte from string to string tr *transform ts int64 rt ServiceRespType latency *serviceLatency m1 *ServiceLatency rc *client usePub bool response bool invalid bool share bool tracking bool didDeliver bool trackingHdr http.Header // header from request } // This is used to record when we create a mapping for implicit service // imports. We use this to clean up entries that are not singletons when // we detect that interest is no longer present. The key to the map will // be the actual interest. We record the mapped subject and the account. type serviceRespEntry struct { acc *Account msub string } // ServiceRespType represents the types of service request response types. type ServiceRespType uint8 // Service response types. Defaults to a singleton. const ( Singleton ServiceRespType = iota Streamed Chunked ) // String helper. func (rt ServiceRespType) String() string { switch rt { case Singleton: return "Singleton" case Streamed: return "Streamed" case Chunked: return "Chunked" } return "Unknown ServiceResType" } // exportAuth holds configured approvals or boolean indicating an // auth token is required for import. type exportAuth struct { tokenReq bool approved map[string]*Account } // streamExport type streamExport struct { exportAuth } // serviceExport holds additional information for exported services. type serviceExport struct { exportAuth acc *Account respType ServiceRespType latency *serviceLatency rtmr *time.Timer respThresh time.Duration } // Used to track service latency. type serviceLatency struct { sampling int8 // percentage from 1-100 or 0 to indicate triggered by header subject string } // exportMap tracks the exported streams and services. type exportMap struct { streams map[string]*streamExport services map[string]*serviceExport responses map[string]*serviceImport } // importMap tracks the imported streams and services. // For services we will also track the response mappings as well. type importMap struct { streams []*streamImport services map[string]*serviceImport rrMap map[string][]*serviceRespEntry } // NewAccount creates a new unlimited account with the given name. func NewAccount(name string) *Account { a := &Account{ Name: name, limits: limits{-1, -1, -1, -1}, eventIds: nuid.New(), } return a } // Used to create shallow copies of accounts for transfer // from opts to real accounts in server struct. func (a *Account) shallowCopy() *Account { na := NewAccount(a.Name) na.Nkey = a.Nkey na.Issuer = a.Issuer if a.imports.streams != nil { na.imports.streams = make([]*streamImport, 0, len(a.imports.streams)) for _, v := range a.imports.streams { si := *v na.imports.streams = append(na.imports.streams, &si) } } if a.imports.services != nil { na.imports.services = make(map[string]*serviceImport) for k, v := range a.imports.services { si := *v na.imports.services[k] = &si } } if a.exports.streams != nil { na.exports.streams = make(map[string]*streamExport) for k, v := range a.exports.streams { if v != nil { se := *v na.exports.streams[k] = &se } else { na.exports.streams[k] = nil } } } if a.exports.services != nil { na.exports.services = make(map[string]*serviceExport) for k, v := range a.exports.services { if v != nil { se := *v na.exports.services[k] = &se } else { na.exports.services[k] = nil } } } // JetStream na.jsLimits = a.jsLimits return na } // nextEventID uses its own lock for better concurrency. func (a *Account) nextEventID() string { a.eventIdsMu.Lock() id := a.eventIds.Next() a.eventIdsMu.Unlock() return id } // Called to track a remote server and connections and leafnodes it // has for this account. func (a *Account) updateRemoteServer(m *AccountNumConns) []*client { a.mu.Lock() if a.strack == nil { a.strack = make(map[string]sconns) } // This does not depend on receiving all updates since each one is idempotent. // FIXME(dlc) - We should cleanup when these both go to zero. prev := a.strack[m.Server.ID] a.strack[m.Server.ID] = sconns{conns: int32(m.Conns), leafs: int32(m.LeafNodes)} a.nrclients += int32(m.Conns) - prev.conns a.nrleafs += int32(m.LeafNodes) - prev.leafs mtce := a.mconns != jwt.NoLimit && (len(a.clients)-int(a.sysclients)+int(a.nrclients) > int(a.mconns)) // If we are over here some have snuck in and we need to rebalance. // All others will probably be doing the same thing but better to be // conservative and bit harsh here. Clients will reconnect if we over compensate. var clients []*client if mtce { clients = make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) over := (len(a.clients) - int(a.sysclients) + int(a.nrclients)) - int(a.mconns) if over < len(clients) { clients = clients[:over] } } // Now check leafnodes. mtlce := a.mleafs != jwt.NoLimit && (a.nleafs+a.nrleafs > a.mleafs) if mtlce { // Take ones from the end. leafs := a.lleafs over := int(a.nleafs + a.nrleafs - a.mleafs) if over < len(leafs) { leafs = leafs[len(leafs)-over:] } clients = append(clients, leafs...) } a.mu.Unlock() // If we have exceeded our max clients this will be populated. return clients } // Removes tracking for a remote server that has shutdown. func (a *Account) removeRemoteServer(sid string) { a.mu.Lock() if a.strack != nil { prev := a.strack[sid] delete(a.strack, sid) a.nrclients -= prev.conns a.nrleafs -= prev.leafs } a.mu.Unlock() } // When querying for subject interest this is the number of // expected responses. We need to actually check that the entry // has active connections. func (a *Account) expectedRemoteResponses() (expected int32) { a.mu.RLock() for _, sc := range a.strack { if sc.conns > 0 || sc.leafs > 0 { expected++ } } a.mu.RUnlock() return } // Clears eventing and tracking for this account. func (a *Account) clearEventing() { a.mu.Lock() a.nrclients = 0 // Now clear state clearTimer(&a.etmr) clearTimer(&a.ctmr) a.clients = nil a.strack = nil a.mu.Unlock() } // GetName will return the accounts name. func (a *Account) GetName() string { if a == nil { return "n/a" } a.mu.RLock() name := a.Name a.mu.RUnlock() return name } // NumConnections returns active number of clients for this account for // all known servers. func (a *Account) NumConnections() int { a.mu.RLock() nc := len(a.clients) + int(a.nrclients) a.mu.RUnlock() return nc } // NumRemoteConnections returns the number of client or leaf connections that // are not on this server. func (a *Account) NumRemoteConnections() int { a.mu.RLock() nc := int(a.nrclients + a.nrleafs) a.mu.RUnlock() return nc } // NumLocalConnections returns active number of clients for this account // on this server. func (a *Account) NumLocalConnections() int { a.mu.RLock() nlc := a.numLocalConnections() a.mu.RUnlock() return nlc } // Do not account for the system accounts. func (a *Account) numLocalConnections() int { return len(a.clients) - int(a.sysclients) - int(a.nleafs) } // This is for extended local interest. // Lock should not be held. func (a *Account) numLocalAndLeafConnections() int { a.mu.RLock() nlc := len(a.clients) - int(a.sysclients) a.mu.RUnlock() return nlc } func (a *Account) numLocalLeafNodes() int { return int(a.nleafs) } // MaxTotalConnectionsReached returns if we have reached our limit for number of connections. func (a *Account) MaxTotalConnectionsReached() bool { var mtce bool a.mu.RLock() if a.mconns != jwt.NoLimit { mtce = len(a.clients)-int(a.sysclients)+int(a.nrclients) >= int(a.mconns) } a.mu.RUnlock() return mtce } // MaxActiveConnections return the set limit for the account system // wide for total number of active connections. func (a *Account) MaxActiveConnections() int { a.mu.RLock() mconns := int(a.mconns) a.mu.RUnlock() return mconns } // MaxTotalLeafNodesReached returns if we have reached our limit for number of leafnodes. func (a *Account) MaxTotalLeafNodesReached() bool { a.mu.RLock() mtc := a.maxTotalLeafNodesReached() a.mu.RUnlock() return mtc } func (a *Account) maxTotalLeafNodesReached() bool { if a.mleafs != jwt.NoLimit { return a.nleafs+a.nrleafs >= a.mleafs } return false } // NumLeafNodes returns the active number of local and remote // leaf node connections. func (a *Account) NumLeafNodes() int { a.mu.RLock() nln := int(a.nleafs + a.nrleafs) a.mu.RUnlock() return nln } // NumRemoteLeafNodes returns the active number of remote // leaf node connections. func (a *Account) NumRemoteLeafNodes() int { a.mu.RLock() nrn := int(a.nrleafs) a.mu.RUnlock() return nrn } // MaxActiveLeafNodes return the set limit for the account system // wide for total number of leavenode connections. // NOTE: these are tracked separately. func (a *Account) MaxActiveLeafNodes() int { a.mu.RLock() mleafs := int(a.mleafs) a.mu.RUnlock() return mleafs } // RoutedSubs returns how many subjects we would send across a route when first // connected or expressing interest. Local client subs. func (a *Account) RoutedSubs() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.rm) } // TotalSubs returns total number of Subscriptions for this account. func (a *Account) TotalSubs() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.sl.Count()) } // MapDest is for mapping published subjects for clients. type MapDest struct { Subject string `json:"subject"` Weight uint8 `json:"weight"` OptCluster string `json:"cluster,omitempty"` } func NewMapDest(subject string, weight uint8) *MapDest { return &MapDest{subject, weight, ""} } // destination is for internal representation for a weighted mapped destination. type destination struct { tr *transform weight uint8 } // mapping is an internal entry for mapping subjects. type mapping struct { src string wc bool dests []*destination cdests map[string][]*destination } // AddMapping adds in a simple route mapping from src subject to dest subject // for inbound client messages. func (a *Account) AddMapping(src, dest string) error { return a.AddWeightedMappings(src, NewMapDest(dest, 100)) } // AddWeightedMapping will add in a weighted mappings for the destinations. // TODO(dlc) - Allow cluster filtering func (a *Account) AddWeightedMappings(src string, dests ...*MapDest) error { a.mu.Lock() defer a.mu.Unlock() // We use this for selecting between multiple weighted destinations. if a.prand == nil { a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } if !IsValidSubject(src) { return ErrBadSubject } m := &mapping{src: src, wc: subjectHasWildcard(src), dests: make([]*destination, 0, len(dests)+1)} seen := make(map[string]struct{}) var tw uint8 for _, d := range dests { if _, ok := seen[d.Subject]; ok { return fmt.Errorf("duplicate entry for %q", d.Subject) } seen[d.Subject] = struct{}{} if d.Weight > 100 { return fmt.Errorf("individual weights need to be <= 100") } tw += d.Weight if tw > 100 { return fmt.Errorf("total weight needs to be <= 100") } if !IsValidSubject(d.Subject) { return ErrBadSubject } tr, err := newTransform(src, d.Subject) if err != nil { return err } if d.OptCluster == "" { m.dests = append(m.dests, &destination{tr, d.Weight}) } else { // We have a cluster scoped filter. if m.cdests == nil { m.cdests = make(map[string][]*destination) } ad := m.cdests[d.OptCluster] ad = append(ad, &destination{tr, d.Weight}) m.cdests[d.OptCluster] = ad } } processDestinations := func(dests []*destination) ([]*destination, error) { var ltw uint8 for _, d := range dests { ltw += d.weight } // Auto add in original at weight difference if all entries weight does not total to 100. // Iff the src was not already added in explicitly, meaning they want loss. _, haveSrc := seen[src] if ltw != 100 && !haveSrc { dest := src if m.wc { // We need to make the appropriate markers for the wildcards etc. dest = transformTokenize(dest) } tr, err := newTransform(src, dest) if err != nil { return nil, err } aw := 100 - ltw if len(dests) == 0 { aw = 100 } dests = append(dests, &destination{tr, aw}) } sort.Slice(dests, func(i, j int) bool { return dests[i].weight < dests[j].weight }) var lw uint8 for _, d := range dests { d.weight += lw lw = d.weight } return dests, nil } var err error if m.dests, err = processDestinations(m.dests); err != nil { return err } // Option cluster scoped destinations for cluster, dests := range m.cdests { if dests, err = processDestinations(dests); err != nil { return err } m.cdests[cluster] = dests } // Replace an old one if it exists. for i, m := range a.mappings { if m.src == src { a.mappings[i] = m return nil } } // If we did not replace add to the end. a.mappings = append(a.mappings, m) return nil } // Helper function to tokenize subjects with partial wildcards into formal transform destinations. // e.g. foo.*.* -> foo.$1.$2 func transformTokenize(subject string) string { // We need to make the appropriate markers for the wildcards etc. i := 1 var nda []string for _, token := range strings.Split(subject, tsep) { if token == "*" { nda = append(nda, fmt.Sprintf("$%d", i)) i++ } else { nda = append(nda, token) } } return strings.Join(nda, tsep) } func transformUntokenize(subject string) (string, []string) { var phs []string var nda []string for _, token := range strings.Split(subject, tsep) { if len(token) > 1 && token[0] == '$' && token[1] >= '1' && token[1] <= '9' { phs = append(phs, token) nda = append(nda, "*") } else { nda = append(nda, token) } } return strings.Join(nda, tsep), phs } // RemoveMapping will remove an existing mapping. func (a *Account) RemoveMapping(src string) bool { a.mu.Lock() defer a.mu.Unlock() for i, m := range a.mappings { if m.src == src { // Swap last one into this spot. Its ok to change order. a.mappings[i] = a.mappings[len(a.mappings)-1] a.mappings[len(a.mappings)-1] = nil // gc a.mappings = a.mappings[:len(a.mappings)-1] return true } } return false } // Indicates we have mapping entries. func (a *Account) hasMappings() bool { if a == nil { return false } a.mu.RLock() n := len(a.mappings) a.mu.RUnlock() return n > 0 } // This performs the logic to map to a new dest subject based on mappings. // Should only be called from processInboundClientMsg or service import processing. func (a *Account) selectMappedSubject(dest string) (string, bool) { a.mu.RLock() if len(a.mappings) == 0 { a.mu.RUnlock() return dest, false } // In case we have to tokenize for subset matching. tsa := [32]string{} tts := tsa[:0] var m *mapping for _, rm := range a.mappings { if !rm.wc && rm.src == dest { m = rm break } else { // tokenize and reuse for subset matching. if len(tts) == 0 { start := 0 subject := dest for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) } if isSubsetMatch(tts, rm.src) { m = rm break } } } if m == nil { a.mu.RUnlock() return dest, false } // The selected destination for the mapping. var d *destination var ndest string dests := m.dests if len(m.cdests) > 0 { cn := a.srv.cachedClusterName() dests = m.cdests[cn] if dests == nil { // Fallback to main if we do not match the cluster. dests = m.dests } } // Optimize for single entry case. if len(dests) == 1 && dests[0].weight == 100 { d = dests[0] } else { w := uint8(a.prand.Int31n(100)) for _, rm := range dests { if w < rm.weight { d = rm break } } } if d != nil { if len(d.tr.dtpi) == 0 { ndest = d.tr.dest } else if nsubj, err := d.tr.transform(tts); err == nil { ndest = nsubj } } a.mu.RUnlock() return ndest, true } // SubscriptionInterest returns true if this account has a matching subscription // for the given `subject`. Works only for literal subjects. // TODO: Add support for wildcards func (a *Account) SubscriptionInterest(subject string) bool { return a.Interest(subject) > 0 } // Interest returns the number of subscriptions for a given subject that match. func (a *Account) Interest(subject string) int { var nms int a.mu.RLock() if a.sl != nil { res := a.sl.Match(subject) nms = len(res.psubs) + len(res.qsubs) } a.mu.RUnlock() return nms } // addClient keeps our accounting of local active clients or leafnodes updated. // Returns previous total. func (a *Account) addClient(c *client) int { a.mu.Lock() n := len(a.clients) if a.clients != nil { a.clients[c] = struct{}{} } added := n != len(a.clients) if added { if c.kind == SYSTEM { a.sysclients++ } else if c.kind == LEAF { a.nleafs++ a.lleafs = append(a.lleafs, c) } } a.mu.Unlock() if c != nil && c.srv != nil && added { c.srv.accConnsUpdate(a) } return n } // Helper function to remove leaf nodes. If number of leafnodes gets large // this may need to be optimized out of linear search but believe number // of active leafnodes per account scope to be small and therefore cache friendly. // Lock should be held on account. func (a *Account) removeLeafNode(c *client) { ll := len(a.lleafs) for i, l := range a.lleafs { if l == c { a.lleafs[i] = a.lleafs[ll-1] if ll == 1 { a.lleafs = nil } else { a.lleafs = a.lleafs[:ll-1] } return } } } // removeClient keeps our accounting of local active clients updated. func (a *Account) removeClient(c *client) int { a.mu.Lock() n := len(a.clients) delete(a.clients, c) removed := n != len(a.clients) if removed { if c.kind == SYSTEM { a.sysclients-- } else if c.kind == LEAF { a.nleafs-- a.removeLeafNode(c) } } a.mu.Unlock() if c != nil && c.srv != nil && removed { c.srv.mu.Lock() doRemove := a != c.srv.gacc c.srv.mu.Unlock() if doRemove { c.srv.accConnsUpdate(a) } } return n } func (a *Account) randomClient() *client { if a.ic != nil { return a.ic } var c *client for c = range a.clients { break } return c } // AddServiceExport will configure the account with the defined export. func (a *Account) AddServiceExport(subject string, accounts []*Account) error { return a.AddServiceExportWithResponse(subject, Singleton, accounts) } // AddServiceExportWithResponse will configure the account with the defined export and response type. func (a *Account) AddServiceExportWithResponse(subject string, respType ServiceRespType, accounts []*Account) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.services == nil { a.exports.services = make(map[string]*serviceExport) } se := a.exports.services[subject] // Always create a service export if se == nil { se = &serviceExport{} } if respType != Singleton { se.respType = respType } if accounts != nil { // empty means auth required but will be import token. if len(accounts) == 0 { se.tokenReq = true } else { if se.approved == nil { se.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { se.approved[acc.Name] = acc } } } lrt := a.lowestServiceExportResponseTime() se.acc = a se.respThresh = DEFAULT_SERVICE_EXPORT_RESPONSE_THRESHOLD a.exports.services[subject] = se if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // TrackServiceExport will enable latency tracking of the named service. // Results will be published in this account to the given results subject. func (a *Account) TrackServiceExport(service, results string) error { return a.TrackServiceExportWithSampling(service, results, DEFAULT_SERVICE_LATENCY_SAMPLING) } // TrackServiceExportWithSampling will enable latency tracking of the named service for the given // sampling rate (1-100). Results will be published in this account to the given results subject. func (a *Account) TrackServiceExportWithSampling(service, results string, sampling int) error { if a == nil { return ErrMissingAccount } if sampling != 0 { // 0 means triggered by header if sampling < 1 || sampling > 100 { return ErrBadSampling } } if !IsValidPublishSubject(results) { return ErrBadPublishSubject } // Don't loop back on outselves. if a.IsExportService(results) { return ErrBadPublishSubject } if a.srv != nil && !a.srv.EventsEnabled() { return ErrNoSysAccount } a.mu.Lock() if a.exports.services == nil { a.mu.Unlock() return ErrMissingService } ea, ok := a.exports.services[service] if !ok { a.mu.Unlock() return ErrMissingService } if ea == nil { ea = &serviceExport{} a.exports.services[service] = ea } else if ea.respType != Singleton { a.mu.Unlock() return ErrBadServiceType } ea.latency = &serviceLatency{ sampling: int8(sampling), subject: results, } s := a.srv a.mu.Unlock() if s == nil { return nil } // Now track down the imports and add in latency as needed to enable. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name && subjectIsSubsetMatch(im.to, service) { im.latency = ea.latency } } acc.mu.Unlock() return true }) return nil } // UnTrackServiceExport will disable latency tracking of the named service. func (a *Account) UnTrackServiceExport(service string) { if a == nil || (a.srv != nil && !a.srv.EventsEnabled()) { return } a.mu.Lock() if a == nil || a.exports.services == nil { a.mu.Unlock() return } ea, ok := a.exports.services[service] if !ok || ea == nil || ea.latency == nil { a.mu.Unlock() return } // We have latency here. ea.latency = nil s := a.srv a.mu.Unlock() if s == nil { return } // Now track down the imports and clean them up. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name { if subjectIsSubsetMatch(im.to, service) { im.latency, im.m1 = nil, nil } } } acc.mu.Unlock() return true }) } // IsExportService will indicate if this service exists. Will check wildcard scenarios. func (a *Account) IsExportService(service string) bool { a.mu.RLock() defer a.mu.RUnlock() _, ok := a.exports.services[service] if ok { return true } tokens := strings.Split(service, tsep) for subj := range a.exports.services { if isSubsetMatch(tokens, subj) { return true } } return false } // IsExportServiceTracking will indicate if given publish subject is an export service with tracking enabled. func (a *Account) IsExportServiceTracking(service string) bool { a.mu.RLock() ea, ok := a.exports.services[service] if ok && ea == nil { a.mu.RUnlock() return false } if ok && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } // FIXME(dlc) - Might want to cache this is in the hot path checking for latency tracking. tokens := strings.Split(service, tsep) for subj, ea := range a.exports.services { if isSubsetMatch(tokens, subj) && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } } a.mu.RUnlock() return false } // ServiceLatency is the JSON message sent out in response to latency tracking for // an accounts exported services. Additional client info is available in requestor // and responder. Note that for a requestor, the only information shared by default // is the RTT used to calculate the total latency. The requestor's account can // designate to share the additional information in the service import. type ServiceLatency struct { TypedEvent Status int `json:"status"` Error string `json:"description,omitempty"` Requestor *ClientInfo `json:"requestor,omitempty"` Responder *ClientInfo `json:"responder,omitempty"` RequestHeader http.Header `json:"header,omitempty"` // only contains header(s) triggering the measurement RequestStart time.Time `json:"start"` ServiceLatency time.Duration `json:"service"` SystemLatency time.Duration `json:"system"` TotalLatency time.Duration `json:"total"` } // ServiceLatencyType is the NATS Event Type for ServiceLatency const ServiceLatencyType = "io.nats.server.metric.v1.service_latency" // NATSTotalTime is a helper function that totals the NATS latencies. func (nl *ServiceLatency) NATSTotalTime() time.Duration { return nl.Requestor.RTT + nl.Responder.RTT + nl.SystemLatency } // Merge function to merge m1 and m2 (requestor and responder) measurements // when there are two samples. This happens when the requestor and responder // are on different servers. // // m2 ServiceLatency is correct, so use that. // m1 TotalLatency is correct, so use that. // Will use those to back into NATS latency. func (m1 *ServiceLatency) merge(m2 *ServiceLatency) { m1.SystemLatency = m1.ServiceLatency - (m2.ServiceLatency + m2.Responder.RTT) m1.ServiceLatency = m2.ServiceLatency m1.Responder = m2.Responder sanitizeLatencyMetric(m1) } // sanitizeLatencyMetric adjusts latency metric values that could go // negative in some edge conditions since we estimate client RTT // for both requestor and responder. // These numbers are never meant to be negative, it just could be // how we back into the values based on estimated RTT. func sanitizeLatencyMetric(sl *ServiceLatency) { if sl.ServiceLatency < 0 { sl.ServiceLatency = 0 } if sl.SystemLatency < 0 { sl.SystemLatency = 0 } } // Used for transporting remote latency measurements. type remoteLatency struct { Account string `json:"account"` ReqId string `json:"req_id"` M2 ServiceLatency `json:"m2"` respThresh time.Duration } // sendLatencyResult will send a latency result and clear the si of the requestor(rc). func (a *Account) sendLatencyResult(si *serviceImport, sl *ServiceLatency) { sl.Type = ServiceLatencyType sl.ID = a.nextEventID() sl.Time = time.Now().UTC() a.mu.Lock() lsubj := si.latency.subject si.rc = nil a.mu.Unlock() a.srv.sendInternalAccountMsg(a, lsubj, sl) } // Used to send a bad request metric when we do not have a reply subject func (a *Account) sendBadRequestTrackingLatency(si *serviceImport, requestor *client, header http.Header) { sl := &ServiceLatency{ Status: 400, Error: "Bad Request", Requestor: requestor.getClientInfo(si.share), } sl.RequestHeader = header sl.RequestStart = time.Now().Add(-sl.Requestor.RTT).UTC() a.sendLatencyResult(si, sl) } // Used to send a latency result when the requestor interest was lost before the // response could be delivered. func (a *Account) sendReplyInterestLostTrackLatency(si *serviceImport) { sl := &ServiceLatency{ Status: 408, Error: "Request Timeout", } a.mu.RLock() rc := si.rc share := si.share ts := si.ts sl.RequestHeader = si.trackingHdr a.mu.RUnlock() if rc != nil { sl.Requestor = rc.getClientInfo(share) } sl.RequestStart = time.Unix(0, ts-int64(sl.Requestor.RTT)).UTC() a.sendLatencyResult(si, sl) } func (a *Account) sendBackendErrorTrackingLatency(si *serviceImport, reason rsiReason) { sl := &ServiceLatency{} a.mu.RLock() rc := si.rc share := si.share ts := si.ts sl.RequestHeader = si.trackingHdr a.mu.RUnlock() if rc != nil { sl.Requestor = rc.getClientInfo(share) } var reqRTT time.Duration if sl.Requestor != nil { reqRTT = sl.Requestor.RTT } sl.RequestStart = time.Unix(0, ts-int64(reqRTT)).UTC() if reason == rsiNoDelivery { sl.Status = 503 sl.Error = "Service Unavailable" } else if reason == rsiTimeout { sl.Status = 504 sl.Error = "Service Timeout" } a.sendLatencyResult(si, sl) } // sendTrackingMessage will send out the appropriate tracking information for the // service request/response latency. This is called when the requestor's server has // received the response. // TODO(dlc) - holding locks for RTTs may be too much long term. Should revisit. func (a *Account) sendTrackingLatency(si *serviceImport, responder *client) bool { if si.rc == nil { return true } ts := time.Now() serviceRTT := time.Duration(ts.UnixNano() - si.ts) requestor := si.rc sl := &ServiceLatency{ Status: 200, Requestor: requestor.getClientInfo(si.share), Responder: responder.getClientInfo(true), } var respRTT, reqRTT time.Duration if sl.Responder != nil { respRTT = sl.Responder.RTT } if sl.Requestor != nil { reqRTT = sl.Requestor.RTT } sl.RequestStart = time.Unix(0, si.ts-int64(reqRTT)).UTC() sl.ServiceLatency = serviceRTT - respRTT sl.TotalLatency = sl.Requestor.RTT + serviceRTT if respRTT > 0 { sl.SystemLatency = time.Since(ts) sl.TotalLatency += sl.SystemLatency } sl.RequestHeader = si.trackingHdr sanitizeLatencyMetric(sl) sl.Type = ServiceLatencyType sl.ID = a.nextEventID() sl.Time = time.Now().UTC() // If we are expecting a remote measurement, store our sl here. // We need to account for the race between this and us receiving the // remote measurement. // FIXME(dlc) - We need to clean these up but this should happen // already with the auto-expire logic. if responder != nil && responder.kind != CLIENT { si.acc.mu.Lock() if si.m1 != nil { m1, m2 := sl, si.m1 m1.merge(m2) si.acc.mu.Unlock() a.srv.sendInternalAccountMsg(a, si.latency.subject, m1) a.mu.Lock() si.rc = nil a.mu.Unlock() return true } si.m1 = sl si.acc.mu.Unlock() return false } else { a.srv.sendInternalAccountMsg(a, si.latency.subject, sl) a.mu.Lock() si.rc = nil a.mu.Unlock() } return true } // This will check to make sure our response lower threshold is set // properly in any clients doing rrTracking. // Lock should be held. func (a *Account) updateAllClientsServiceExportResponseTime(lrt time.Duration) { for c := range a.clients { c.mu.Lock() if c.rrTracking != nil && lrt != c.rrTracking.lrt { c.rrTracking.lrt = lrt if c.rrTracking.ptmr.Stop() { c.rrTracking.ptmr.Reset(lrt) } } c.mu.Unlock() } } // Will select the lowest respThresh from all service exports. // Read lock should be held. func (a *Account) lowestServiceExportResponseTime() time.Duration { // Lowest we will allow is 5 minutes. Its an upper bound for this function. lrt := time.Duration(5 * time.Minute) for _, se := range a.exports.services { if se.respThresh < lrt { lrt = se.respThresh } } return lrt } // AddServiceImportWithClaim will add in the service import via the jwt claim. func (a *Account) AddServiceImportWithClaim(destination *Account, from, to string, imClaim *jwt.Import) error { if destination == nil { return ErrMissingAccount } // Empty means use from. if to == "" { to = from } if !IsValidSubject(from) || !IsValidSubject(to) { return ErrInvalidSubject } // First check to see if the account has authorized us to route to the "to" subject. if !destination.checkServiceImportAuthorized(a, to, imClaim) { return ErrServiceImportAuthorization } // Check if this introduces a cycle before proceeding. if err := a.serviceImportFormsCycle(destination, from); err != nil { return err } _, err := a.addServiceImport(destination, from, to, imClaim) return err } const MaxAccountCycleSearchDepth = 1024 func (a *Account) serviceImportFormsCycle(dest *Account, from string) error { return dest.checkServiceImportsForCycles(from, map[string]bool{a.Name: true}) } func (a *Account) checkServiceImportsForCycles(from string, visited map[string]bool) error { if len(visited) >= MaxAccountCycleSearchDepth { return ErrCycleSearchDepth } a.mu.RLock() for _, si := range a.imports.services { if SubjectsCollide(from, si.to) { a.mu.RUnlock() if visited[si.acc.Name] { return ErrImportFormsCycle } // Push ourselves and check si.acc visited[a.Name] = true if subjectIsSubsetMatch(si.from, from) { from = si.from } if err := si.acc.checkServiceImportsForCycles(from, visited); err != nil { return err } a.mu.RLock() } } a.mu.RUnlock() return nil } func (a *Account) streamImportFormsCycle(dest *Account, to string) error { return dest.checkStreamImportsForCycles(to, map[string]bool{a.Name: true}) } // Lock should be held. func (a *Account) hasStreamExportMatching(to string) bool { for subj := range a.exports.streams { if subjectIsSubsetMatch(to, subj) { return true } } return false } func (a *Account) checkStreamImportsForCycles(to string, visited map[string]bool) error { if len(visited) >= MaxAccountCycleSearchDepth { return ErrCycleSearchDepth } a.mu.RLock() if !a.hasStreamExportMatching(to) { a.mu.RUnlock() return nil } for _, si := range a.imports.streams { if SubjectsCollide(to, si.to) { a.mu.RUnlock() if visited[si.acc.Name] { return ErrImportFormsCycle } // Push ourselves and check si.acc visited[a.Name] = true if subjectIsSubsetMatch(si.to, to) { to = si.to } if err := si.acc.checkStreamImportsForCycles(to, visited); err != nil { return err } a.mu.RLock() } } a.mu.RUnlock() return nil } // SetServiceImportSharing will allow sharing of information about requests with the export account. // Used for service latency tracking at the moment. func (a *Account) SetServiceImportSharing(destination *Account, to string, allow bool) error { a.mu.Lock() defer a.mu.Unlock() if a.isClaimAccount() { return fmt.Errorf("claim based accounts can not be updated directly") } for _, si := range a.imports.services { if si.acc == destination && si.to == to { si.share = allow return nil } } return fmt.Errorf("service import not found") } // AddServiceImport will add a route to an account to send published messages / requests // to the destination account. From is the local subject to map, To is the // subject that will appear on the destination account. Destination will need // to have an import rule to allow access via addService. func (a *Account) AddServiceImport(destination *Account, from, to string) error { return a.AddServiceImportWithClaim(destination, from, to, nil) } // NumPendingReverseResponses returns the number of response mappings we have for all outstanding // requests for service imports. func (a *Account) NumPendingReverseResponses() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.rrMap) } // NumPendingAllResponses return the number of all responses outstanding for service exports. func (a *Account) NumPendingAllResponses() int { return a.NumPendingResponses("") } // NumResponsesPending returns the number of responses outstanding for service exports // on this account. An empty filter string returns all responses regardless of which export. // If you specify the filter we will only return ones that are for that export. // NOTE this is only for what this server is tracking. func (a *Account) NumPendingResponses(filter string) int { a.mu.RLock() defer a.mu.RUnlock() if filter == "" { return len(a.exports.responses) } se := a.getServiceExport(filter) if se == nil { return 0 } var nre int for _, si := range a.exports.responses { if si.se == se { nre++ } } return nre } // NumServiceImports returns the number of service imports we have configured. func (a *Account) NumServiceImports() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.services) } // Reason why we are removing this response serviceImport. type rsiReason int const ( rsiOk = rsiReason(iota) rsiNoDelivery rsiTimeout ) // removeRespServiceImport removes a response si mapping and the reverse entries for interest detection. func (a *Account) removeRespServiceImport(si *serviceImport, reason rsiReason) { if si == nil { return } a.mu.Lock() delete(a.exports.responses, si.from) dest := si.acc to := si.to tracking := si.tracking rc := si.rc a.mu.Unlock() if tracking && rc != nil { a.sendBackendErrorTrackingLatency(si, reason) } dest.checkForReverseEntry(to, si, false) } // removeServiceImport will remove the route by subject. func (a *Account) removeServiceImport(subject string) { a.mu.Lock() si, ok := a.imports.services[subject] delete(a.imports.services, subject) var sid []byte c := a.ic if ok && si != nil { if a.ic != nil && si.sid != nil { sid = si.sid } } a.mu.Unlock() if sid != nil { c.processUnsub(sid) } } // This tracks responses to service requests mappings. This is used for cleanup. func (a *Account) addReverseRespMapEntry(acc *Account, reply, from string) { a.mu.Lock() if a.imports.rrMap == nil { a.imports.rrMap = make(map[string][]*serviceRespEntry) } sre := &serviceRespEntry{acc, from} sra := a.imports.rrMap[reply] a.imports.rrMap[reply] = append(sra, sre) a.mu.Unlock() } // checkForReverseEntries is for when we are trying to match reverse entries to a wildcard. // This will be called from checkForReverseEntry when the reply arg is a wildcard subject. // This will usually be called in a go routine since we need to walk all the entries. func (a *Account) checkForReverseEntries(reply string, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectIsLiteral(reply) { a.mu.RUnlock() a.checkForReverseEntry(reply, nil, checkInterest) return } var _rs [32]string rs := _rs[:0] for k := range a.imports.rrMap { if subjectIsSubsetMatch(k, reply) { rs = append(rs, k) } } a.mu.RUnlock() for _, reply := range rs { a.checkForReverseEntry(reply, nil, checkInterest) } } // This checks for any response map entries. If you specify an si we will only match and // clean up for that one, otherwise we remove them all. func (a *Account) checkForReverseEntry(reply string, si *serviceImport, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectHasWildcard(reply) { a.mu.RUnlock() go a.checkForReverseEntries(reply, checkInterest) return } sres := a.imports.rrMap[reply] if sres == nil { a.mu.RUnlock() return } // If we are here we have an entry we should check. // If requested we will first check if there is any // interest for this subject for the entire account. // If there is we can not delete any entries yet. // Note that if we are here reply has to be a literal subject. if checkInterest { // If interest still exists we can not clean these up yet. if rr := a.sl.Match(reply); len(rr.psubs)+len(rr.qsubs) > 0 { a.mu.RUnlock() return } } a.mu.RUnlock() // Delete the appropriate entries here based on optional si. a.mu.Lock() if si == nil { delete(a.imports.rrMap, reply) } else { // Find the one we are looking for.. for i, sre := range sres { if sre.msub == si.from { sres = append(sres[:i], sres[i+1:]...) break } } if len(sres) > 0 { a.imports.rrMap[si.to] = sres } else { delete(a.imports.rrMap, si.to) } } a.mu.Unlock() // If we are here we no longer have interest and we have // response entries that we should clean up. if si == nil { for _, sre := range sres { acc := sre.acc var trackingCleanup bool var rsi *serviceImport acc.mu.Lock() if rsi = acc.exports.responses[sre.msub]; rsi != nil && !rsi.didDeliver { delete(acc.exports.responses, rsi.from) trackingCleanup = rsi.tracking && rsi.rc != nil } acc.mu.Unlock() if trackingCleanup { acc.sendReplyInterestLostTrackLatency(rsi) } } } } // Internal check to see if a service import exists. func (a *Account) serviceImportExists(dest *Account, from string) bool { a.mu.RLock() dup := a.imports.services[from] a.mu.RUnlock() return dup != nil } // Add a service import. // This does no checks and should only be called by the msg processing code. // Use AddServiceImport from above if responding to user input or config changes, etc. func (a *Account) addServiceImport(dest *Account, from, to string, claim *jwt.Import) (*serviceImport, error) { rt := Singleton var lat *serviceLatency dest.mu.RLock() se := dest.getServiceExport(to) if se != nil { rt = se.respType lat = se.latency } s := dest.srv dest.mu.RUnlock() // Track if this maps us to the system account. // We will always share information with them. var isSysAcc bool if s != nil { s.mu.Lock() if s.sys != nil && dest == s.sys.account { isSysAcc = true } s.mu.Unlock() } a.mu.Lock() if a.imports.services == nil { a.imports.services = make(map[string]*serviceImport) } else if dup := a.imports.services[from]; dup != nil { a.mu.Unlock() return nil, fmt.Errorf("duplicate service import subject %q, previously used in import for account %q, subject %q", from, dup.acc.Name, dup.to) } if to == "" { to = from } // Check to see if we have a wildcard var ( usePub bool tr *transform err error ) if subjectHasWildcard(to) { // If to and from match, then we use the published subject. if to == from { usePub = true } else { to, _ = transformUntokenize(to) // Create a transform. Do so in reverse such that $ symbols only exist in to if tr, err = newTransform(to, transformTokenize(from)); err != nil { a.mu.Unlock() return nil, fmt.Errorf("failed to create mapping transform for service import subject %q to %q: %v", from, to, err) } else { // un-tokenize and reverse transform so we get the transform needed from, _ = transformUntokenize(from) tr = tr.reverse() } } } // Turn on sharing by default if importing from system services. share := isSysAcc if claim != nil { share = claim.Share } si := &serviceImport{dest, claim, se, nil, from, to, tr, 0, rt, lat, nil, nil, usePub, false, false, share, false, false, nil} a.imports.services[from] = si a.mu.Unlock() if err := a.addServiceImportSub(si); err != nil { a.removeServiceImport(si.from) return nil, err } return si, nil } // Returns the internal client, will create one if not present. // Lock should be held. func (a *Account) internalClient() *client { if a.ic == nil && a.srv != nil { a.ic = a.srv.createInternalAccountClient() a.ic.acc = a } return a.ic } // Internal account scoped subscriptions. func (a *Account) subscribeInternal(subject string, cb msgHandler) (*subscription, error) { a.mu.Lock() c := a.internalClient() a.isid++ sid := strconv.FormatUint(a.isid, 10) a.mu.Unlock() // This will happen in parsing when the account has not been properly setup. if c == nil { return nil, fmt.Errorf("no internal account client") } return c.processSub([]byte(subject), nil, []byte(sid), cb, false) } // This will add an account subscription that matches the "from" from a service import entry. func (a *Account) addServiceImportSub(si *serviceImport) error { a.mu.Lock() c := a.internalClient() // This will happen in parsing when the account has not been properly setup. if c == nil { a.mu.Unlock() return nil } if si.sid != nil { a.mu.Unlock() return fmt.Errorf("duplicate call to create subscription for service import") } a.isid++ sid := strconv.FormatUint(a.isid, 10) si.sid = []byte(sid) subject := si.from a.mu.Unlock() cb := func(sub *subscription, c *client, subject, reply string, msg []byte) { c.processServiceImport(si, a, msg) } _, err := c.processSub([]byte(subject), nil, []byte(sid), cb, true) return err } // Remove all the subscriptions associated with service imports. func (a *Account) removeAllServiceImportSubs() { a.mu.RLock() var sids [][]byte for _, si := range a.imports.services { if si.sid != nil { sids = append(sids, si.sid) si.sid = nil } } c := a.ic a.ic = nil a.mu.RUnlock() if c == nil { return } for _, sid := range sids { c.processUnsub(sid) } c.closeConnection(InternalClient) } // Add in subscriptions for all registered service imports. func (a *Account) addAllServiceImportSubs() { for _, si := range a.imports.services { a.addServiceImportSub(si) } } var ( // header where all information is encoded in one value. trcUber = textproto.CanonicalMIMEHeaderKey("Uber-Trace-Id") trcCtx = textproto.CanonicalMIMEHeaderKey("Traceparent") trcB3 = textproto.CanonicalMIMEHeaderKey("B3") // openzipkin header to check trcB3Sm = textproto.CanonicalMIMEHeaderKey("X-B3-Sampled") trcB3Id = textproto.CanonicalMIMEHeaderKey("X-B3-TraceId") // additional header needed to include when present trcB3PSId = textproto.CanonicalMIMEHeaderKey("X-B3-ParentSpanId") trcB3SId = textproto.CanonicalMIMEHeaderKey("X-B3-SpanId") trcCtxSt = textproto.CanonicalMIMEHeaderKey("Tracestate") trcUberCtxPrefix = textproto.CanonicalMIMEHeaderKey("Uberctx-") ) func newB3Header(h http.Header) http.Header { retHdr := http.Header{} if v, ok := h[trcB3Sm]; ok { retHdr[trcB3Sm] = v } if v, ok := h[trcB3Id]; ok { retHdr[trcB3Id] = v } if v, ok := h[trcB3PSId]; ok { retHdr[trcB3PSId] = v } if v, ok := h[trcB3SId]; ok { retHdr[trcB3SId] = v } return retHdr } func newUberHeader(h http.Header, tId []string) http.Header { retHdr := http.Header{trcUber: tId} for k, v := range h { if strings.HasPrefix(k, trcUberCtxPrefix) { retHdr[k] = v } } return retHdr } func newTraceCtxHeader(h http.Header, tId []string) http.Header { retHdr := http.Header{trcCtx: tId} if v, ok := h[trcCtxSt]; ok { retHdr[trcCtxSt] = v } return retHdr } // Helper to determine when to sample. When header has a value, sampling is driven by header func shouldSample(l *serviceLatency, c *client) (bool, http.Header) { if l == nil { return false, nil } if l.sampling < 0 { return false, nil } if l.sampling >= 100 { return true, nil } if l.sampling > 0 && rand.Int31n(100) <= int32(l.sampling) { return true, nil } h := c.parseState.getHeader() if len(h) == 0 { return false, nil } if tId := h[trcUber]; len(tId) != 0 { // sample 479fefe9525eddb:5adb976bfc1f95c1:479fefe9525eddb:1 tk := strings.Split(tId[0], ":") if len(tk) == 4 && len(tk[3]) > 0 && len(tk[3]) <= 2 { dst := [2]byte{} src := [2]byte{'0', tk[3][0]} if len(tk[3]) == 2 { src[1] = tk[3][1] } if _, err := hex.Decode(dst[:], src[:]); err == nil && dst[0]&1 == 1 { return true, newUberHeader(h, tId) } } return false, nil } else if sampled := h[trcB3Sm]; len(sampled) != 0 && sampled[0] == "1" { return true, newB3Header(h) // allowed } else if len(sampled) != 0 && sampled[0] == "0" { return false, nil // denied } else if _, ok := h[trcB3Id]; ok { // sample 80f198ee56343ba864fe8b2a57d3eff7 // presence (with X-B3-Sampled not being 0) means sampling left to recipient return true, newB3Header(h) } else if b3 := h[trcB3]; len(b3) != 0 { // sample 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-1-05e3ac9a4f6e3b90 // sample 0 tk := strings.Split(b3[0], "-") if len(tk) > 2 && tk[2] == "0" { return false, nil // denied } else if len(tk) == 1 && tk[0] == "0" { return false, nil // denied } return true, http.Header{trcB3: b3} // sampling allowed or left to recipient of header } else if tId := h[trcCtx]; len(tId) != 0 { // sample 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01 tk := strings.Split(tId[0], "-") if len(tk) == 4 && len([]byte(tk[3])) == 2 && tk[3] == "01" { return true, newTraceCtxHeader(h, tId) } else { return false, nil } } return false, nil } // Used to mimic client like replies. const ( replyPrefix = "_R_." trackSuffix = ".T" replyPrefixLen = len(replyPrefix) baseServerLen = 10 replyLen = 6 minReplyLen = 15 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // This is where all service export responses are handled. func (a *Account) processServiceImportResponse(sub *subscription, c *client, subject, reply string, msg []byte) { a.mu.RLock() if a.expired || len(a.exports.responses) == 0 { a.mu.RUnlock() return } si := a.exports.responses[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() // Send for normal processing. c.processServiceImport(si, a, msg) } // Will create a wildcard subscription to handle interest graph propagation for all // service replies. // Lock should not be held. func (a *Account) createRespWildcard() []byte { a.mu.Lock() if a.prand == nil { a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } var b = [baseServerLen]byte{'_', 'R', '_', '.'} rn := a.prand.Int63() for i, l := replyPrefixLen, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } a.siReply = append(b[:], '.') pre := a.siReply wcsub := append(a.siReply, '>') c := a.internalClient() a.isid++ sid := strconv.FormatUint(a.isid, 10) a.mu.Unlock() // Create subscription and internal callback for all the wildcard response subjects. c.processSub(wcsub, nil, []byte(sid), a.processServiceImportResponse, false) return pre } // Test whether this is a tracked reply. func isTrackedReply(reply []byte) bool { lreply := len(reply) - 1 return lreply > 3 && reply[lreply-1] == '.' && reply[lreply] == 'T' } // Generate a new service reply from the wildcard prefix. // FIXME(dlc) - probably do not have to use rand here. about 25ns per. func (a *Account) newServiceReply(tracking bool) []byte { a.mu.RLock() replyPre := a.siReply s := a.srv a.mu.RUnlock() if replyPre == nil { replyPre = a.createRespWildcard() } var b [replyLen]byte rn := a.prand.Int63() for i, l := 0, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } // Make sure to copy. reply := make([]byte, 0, len(replyPre)+len(b)) reply = append(reply, replyPre...) reply = append(reply, b[:]...) if tracking && s.sys != nil { // Add in our tracking identifier. This allows the metrics to get back to only // this server without needless SUBS/UNSUBS. reply = append(reply, '.') reply = append(reply, s.sys.shash...) reply = append(reply, '.', 'T') } return reply } // Checks if a serviceImport was created to map responses. func (si *serviceImport) isRespServiceImport() bool { return si != nil && si.response } // Sets the response theshold timer for a service export. // Account lock should be held func (se *serviceExport) setResponseThresholdTimer() { if se.rtmr != nil { return // Already set } se.rtmr = time.AfterFunc(se.respThresh, se.checkExpiredResponses) } // Account lock should be held func (se *serviceExport) clearResponseThresholdTimer() bool { if se.rtmr == nil { return true } stopped := se.rtmr.Stop() se.rtmr = nil return stopped } // checkExpiredResponses will check for any pending responses that need to // be cleaned up. func (se *serviceExport) checkExpiredResponses() { acc := se.acc if acc == nil { acc.mu.Lock() se.clearResponseThresholdTimer() acc.mu.Unlock() return } var expired []*serviceImport mints := time.Now().UnixNano() - int64(se.respThresh) // TODO(dlc) - Should we release lock while doing this? Or only do these in batches? // Should we break this up for responses only from this service export? // Responses live on acc directly for fast inbound processsing for the _R_ wildcard. // We could do another indirection at this level but just to get to the service export? var totalResponses int acc.mu.RLock() for _, si := range acc.exports.responses { if si.se == se { totalResponses++ if si.ts <= mints { expired = append(expired, si) } } } acc.mu.RUnlock() for _, si := range expired { acc.removeRespServiceImport(si, rsiTimeout) } // Pull out expired to determine if we have any left for timer. totalResponses -= len(expired) // Redo timer as needed. acc.mu.Lock() if totalResponses > 0 && se.rtmr != nil { se.rtmr.Stop() se.rtmr.Reset(se.respThresh) } else { se.clearResponseThresholdTimer() } acc.mu.Unlock() } // ServiceExportResponseThreshold returns the current threshold. func (a *Account) ServiceExportResponseThreshold(export string) (time.Duration, error) { a.mu.Lock() defer a.mu.Unlock() se := a.getServiceExport(export) if se == nil { return 0, fmt.Errorf("no export defined for %q", export) } return se.respThresh, nil } // SetServiceExportResponseThreshold sets the maximum time the system will a response to be delivered // from a service export responder. func (a *Account) SetServiceExportResponseThreshold(export string, maxTime time.Duration) error { a.mu.Lock() defer a.mu.Unlock() if a.isClaimAccount() { return fmt.Errorf("claim based accounts can not be updated directly") } lrt := a.lowestServiceExportResponseTime() se := a.getServiceExport(export) if se == nil { return fmt.Errorf("no export defined for %q", export) } se.respThresh = maxTime if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // This is for internal service import responses. func (a *Account) addRespServiceImport(dest *Account, to string, osi *serviceImport, tracking bool, header http.Header) *serviceImport { nrr := string(osi.acc.newServiceReply(tracking)) a.mu.Lock() rt := osi.rt // dest is the requestor's account. a is the service responder with the export. // Marked as internal here, that is how we distinguish. si := &serviceImport{dest, nil, osi.se, nil, nrr, to, nil, 0, rt, nil, nil, nil, false, true, false, osi.share, false, false, nil} if a.exports.responses == nil { a.exports.responses = make(map[string]*serviceImport) } a.exports.responses[nrr] = si // Always grab time and make sure response threshold timer is running. si.ts = time.Now().UnixNano() osi.se.setResponseThresholdTimer() if rt == Singleton && tracking { si.latency = osi.latency si.tracking = true si.trackingHdr = header } a.mu.Unlock() // We do not do individual subscriptions here like we do on configured imports. // We have an internal callback for all responses inbound to this account and // will process appropriately there. This does not pollute the sublist and the caches. // We do add in the reverse map such that we can detect loss of interest and do proper // cleanup of this si as interest goes away. dest.addReverseRespMapEntry(a, to, nrr) return si } // AddStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddStreamImportWithClaim(account *Account, from, prefix string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } // Check prefix if it exists and make sure its a literal. // Append token separator if not already present. if prefix != "" { // Make sure there are no wildcards here, this prefix needs to be a literal // since it will be prepended to a publish subject. if !subjectIsLiteral(prefix) { return ErrStreamImportBadPrefix } if prefix[len(prefix)-1] != btsep { prefix = prefix + string(btsep) } } return a.AddMappedStreamImportWithClaim(account, from, prefix+from, imClaim) } // AddMappedStreamImport helper for AddMappedStreamImportWithClaim func (a *Account) AddMappedStreamImport(account *Account, from, to string) error { return a.AddMappedStreamImportWithClaim(account, from, to, nil) } // AddMappedStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddMappedStreamImportWithClaim(account *Account, from, to string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } if to == "" { to = from } // Check if this forms a cycle. if err := a.streamImportFormsCycle(account, to); err != nil { return err } var ( usePub bool tr *transform err error ) if subjectHasWildcard(from) { if to == from { usePub = true } else { // Create a transform if tr, err = newTransform(from, transformTokenize(to)); err != nil { return fmt.Errorf("failed to create mapping transform for stream import subject %q to %q: %v", from, to, err) } to, _ = transformUntokenize(to) } } a.mu.Lock() if a.isStreamImportDuplicate(account, from) { a.mu.Unlock() return ErrStreamImportDuplicate } a.imports.streams = append(a.imports.streams, &streamImport{account, from, to, tr, nil, imClaim, usePub, false}) a.mu.Unlock() return nil } // isStreamImportDuplicate checks for duplicate. // Lock should be held. func (a *Account) isStreamImportDuplicate(acc *Account, from string) bool { for _, si := range a.imports.streams { if si.acc == acc && si.from == from { return true } } return false } // AddStreamImport will add in the stream import from a specific account. func (a *Account) AddStreamImport(account *Account, from, prefix string) error { return a.AddStreamImportWithClaim(account, from, prefix, nil) } // IsPublicExport is a placeholder to denote a public export. var IsPublicExport = []*Account(nil) // AddStreamExport will add an export to the account. If accounts is nil // it will signify a public export, meaning anyone can impoort. func (a *Account) AddStreamExport(subject string, accounts []*Account) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.streams == nil { a.exports.streams = make(map[string]*streamExport) } ea := a.exports.streams[subject] if accounts != nil { if ea == nil { ea = &streamExport{} } // empty means auth required but will be import token. if len(accounts) == 0 { ea.tokenReq = true } else { if ea.approved == nil { ea.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { ea.approved[acc.Name] = acc } } } a.exports.streams[subject] = ea return nil } // Check if another account is authorized to import from us. func (a *Account) checkStreamImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the exports list. a.mu.RLock() auth := a.checkStreamImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return auth } func (a *Account) checkStreamImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { if a.exports.streams == nil || !IsValidSubject(subject) { return false } return a.checkStreamExportApproved(account, subject, imClaim) } func (a *Account) checkAuth(ea *exportAuth, account *Account, imClaim *jwt.Import) bool { // if ea is nil or ea.approved is nil, that denotes a public export if ea == nil || (ea.approved == nil && !ea.tokenReq) { return true } // Check if token required if ea.tokenReq { return a.checkActivation(account, imClaim, true) } // If we have a matching account we are authorized _, ok := ea.approved[account.Name] return ok } func (a *Account) checkStreamExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first ea, ok := a.exports.streams[subject] if ok { if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim) } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, ea := range a.exports.streams { if isSubsetMatch(tokens, subj) { if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim) } } return false } func (a *Account) checkServiceExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first se, ok := a.exports.services[subject] if ok { // if se is nil or eq.approved is nil, that denotes a public export if se == nil || (se.approved == nil && !se.tokenReq) { return true } // Check if token required if se.tokenReq { return a.checkActivation(account, imClaim, true) } // If we have a matching account we are authorized _, ok := se.approved[account.Name] return ok } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { if se == nil || (se.approved == nil && !se.tokenReq) { return true } // Check if token required if se.tokenReq { return a.checkActivation(account, imClaim, true) } _, ok := se.approved[account.Name] return ok } } return false } // Helper function to get a serviceExport. // Lock should be held on entry. func (a *Account) getServiceExport(subj string) *serviceExport { se, ok := a.exports.services[subj] // The export probably has a wildcard, so lookup that up. if !ok { se = a.getWildcardServiceExport(subj) } return se } // This helper is used when trying to match a serviceExport record that is // represented by a wildcard. // Lock should be held on entry. func (a *Account) getWildcardServiceExport(from string) *serviceExport { tokens := strings.Split(from, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { return se } } return nil } // Will fetch the activation token for an import. func fetchActivation(url string) string { // FIXME(dlc) - Make configurable. c := &http.Client{Timeout: 2 * time.Second} resp, err := c.Get(url) if err != nil || resp == nil { return "" } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "" } return string(body) } // These are import stream specific versions for when an activation expires. func (a *Account) streamActivationExpired(exportAcc *Account, subject string) { a.mu.RLock() if a.expired || a.imports.streams == nil { a.mu.RUnlock() return } var si *streamImport for _, si = range a.imports.streams { if si.acc == exportAcc && si.from == subject { break } } if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true clients := make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } awcsti := map[string]struct{}{a.Name: {}} a.mu.Unlock() for _, c := range clients { c.processSubsOnConfigReload(awcsti) } } // These are import service specific versions for when an activation expires. func (a *Account) serviceActivationExpired(subject string) { a.mu.RLock() if a.expired || a.imports.services == nil { a.mu.RUnlock() return } si := a.imports.services[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true a.mu.Unlock() } // Fires for expired activation tokens. We could track this with timers etc. // Instead we just re-analyze where we are and if we need to act. func (a *Account) activationExpired(exportAcc *Account, subject string, kind jwt.ExportType) { switch kind { case jwt.Stream: a.streamActivationExpired(exportAcc, subject) case jwt.Service: a.serviceActivationExpired(subject) } } func isRevoked(revocations map[string]int64, subject string, issuedAt int64) bool { if revocations == nil { return false } if t, ok := revocations[subject]; !ok || t < issuedAt { return false } return true } // checkActivation will check the activation token for validity. func (a *Account) checkActivation(importAcc *Account, claim *jwt.Import, expTimer bool) bool { if claim == nil || claim.Token == "" { return false } // Create a quick clone so we can inline Token JWT. clone := *claim // We grab the token from a URL by hand here since we need expiration etc. if url, err := url.Parse(clone.Token); err == nil && url.Scheme != "" { clone.Token = fetchActivation(url.String()) } vr := jwt.CreateValidationResults() clone.Validate(importAcc.Name, vr) if vr.IsBlocking(true) { return false } act, err := jwt.DecodeActivationClaims(clone.Token) if err != nil { return false } if !a.isIssuerClaimTrusted(act) { return false } vr = jwt.CreateValidationResults() act.Validate(vr) if vr.IsBlocking(true) { return false } if act.Expires != 0 { tn := time.Now().Unix() if act.Expires <= tn { return false } if expTimer { expiresAt := time.Duration(act.Expires - tn) time.AfterFunc(expiresAt*time.Second, func() { importAcc.activationExpired(a, string(act.ImportSubject), claim.Type) }) } } // Check for token revocation.. return !isRevoked(a.actsRevoked, act.Subject, act.IssuedAt) } // Returns true if the activation claim is trusted. That is the issuer matches // the account or is an entry in the signing keys. func (a *Account) isIssuerClaimTrusted(claims *jwt.ActivationClaims) bool { // if no issuer account, issuer is the account if claims.IssuerAccount == "" { return true } // If the IssuerAccount is not us, then this is considered an error. if a.Name != claims.IssuerAccount { if a.srv != nil { a.srv.Errorf("Invalid issuer account %q in activation claim (subject: %q - type: %q) for account %q", claims.IssuerAccount, claims.Activation.ImportSubject, claims.Activation.ImportType, a.Name) } return false } _, ok := a.hasIssuerNoLock(claims.Issuer) return ok } // Returns true if `a` and `b` stream imports are the same. Note that the // check is done with the account's name, not the pointer. This is used // during config reload where we are comparing current and new config // in which pointers are different. // No lock is acquired in this function, so it is assumed that the // import maps are not changed while this executes. func (a *Account) checkStreamImportsEqual(b *Account) bool { if len(a.imports.streams) != len(b.imports.streams) { return false } // Load the b imports into a map index by what we are looking for. bm := make(map[string]*streamImport, len(b.imports.streams)) for _, bim := range b.imports.streams { bm[bim.acc.Name+bim.from+bim.to] = bim } for _, aim := range a.imports.streams { if _, ok := bm[aim.acc.Name+aim.from+aim.to]; !ok { return false } } return true } func (a *Account) checkStreamExportsEqual(b *Account) bool { if len(a.exports.streams) != len(b.exports.streams) { return false } for subj, aea := range a.exports.streams { bea, ok := b.exports.streams[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } func (a *Account) checkServiceExportsEqual(b *Account) bool { if len(a.exports.services) != len(b.exports.services) { return false } for subj, aea := range a.exports.services { bea, ok := b.exports.services[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { a.mu.RLock() authorized := a.checkServiceImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return authorized } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the services list. if a.exports.services == nil { return false } return a.checkServiceExportApproved(account, subject, imClaim) } // IsExpired returns expiration status. func (a *Account) IsExpired() bool { a.mu.RLock() exp := a.expired a.mu.RUnlock() return exp } // Called when an account has expired. func (a *Account) expiredTimeout() { // Mark expired first. a.mu.Lock() a.expired = true a.mu.Unlock() // Collect the clients and expire them. cs := make([]*client, 0, len(a.clients)) a.mu.RLock() for c := range a.clients { cs = append(cs, c) } a.mu.RUnlock() for _, c := range cs { c.accountAuthExpired() } } // Sets the expiration timer for an account JWT that has it set. func (a *Account) setExpirationTimer(d time.Duration) { a.etmr = time.AfterFunc(d, a.expiredTimeout) } // Lock should be held func (a *Account) clearExpirationTimer() bool { if a.etmr == nil { return true } stopped := a.etmr.Stop() a.etmr = nil return stopped } // checkUserRevoked will check if a user has been revoked. func (a *Account) checkUserRevoked(nkey string, issuedAt int64) bool { a.mu.RLock() defer a.mu.RUnlock() return isRevoked(a.usersRevoked, nkey, issuedAt) } // Check expiration and set the proper state as needed. func (a *Account) checkExpiration(claims *jwt.ClaimsData) { a.mu.Lock() defer a.mu.Unlock() a.clearExpirationTimer() if claims.Expires == 0 { a.expired = false return } tn := time.Now().Unix() if claims.Expires <= tn { a.expired = true return } expiresAt := time.Duration(claims.Expires - tn) a.setExpirationTimer(expiresAt * time.Second) a.expired = false } // hasIssuer returns true if the issuer matches the account // If the issuer is a scoped signing key, the scope will be returned as well // issuer or it is a signing key for the account. func (a *Account) hasIssuer(issuer string) (jwt.Scope, bool) { a.mu.RLock() scope, ok := a.hasIssuerNoLock(issuer) a.mu.RUnlock() return scope, ok } // hasIssuerNoLock is the unlocked version of hasIssuer func (a *Account) hasIssuerNoLock(issuer string) (jwt.Scope, bool) { // same issuer -- keep this for safety on the calling code if a.Name == issuer { return nil, true } scope, ok := a.signingKeys[issuer] return scope, ok } // Returns the loop detection subject used for leafnodes func (a *Account) getLDSubject() string { a.mu.RLock() lds := a.lds a.mu.RUnlock() return lds } // Placeholder for signaling token auth required. var tokenAuthReq = []*Account{} func authAccounts(tokenReq bool) []*Account { if tokenReq { return tokenAuthReq } return nil } // SetAccountResolver will assign the account resolver. func (s *Server) SetAccountResolver(ar AccountResolver) { s.mu.Lock() s.accResolver = ar s.mu.Unlock() } // AccountResolver returns the registered account resolver. func (s *Server) AccountResolver() AccountResolver { s.mu.Lock() ar := s.accResolver s.mu.Unlock() return ar } // isClaimAccount returns if this account is backed by a JWT claim. // Lock should be held. func (a *Account) isClaimAccount() bool { return a.claimJWT != "" } // updateAccountClaims will update an existing account with new claims. // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) { s.updateAccountClaimsWithRefresh(a, ac, true) } // updateAccountClaimsWithRefresh will update an existing account with new claims. // If refreshImportingAccounts is true it will also update incomplete dependent accounts // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaims, refreshImportingAccounts bool) { if a == nil { return } s.Debugf("Updating account claims: %s/%s", a.Name, ac.Name) a.checkExpiration(ac.Claims()) a.mu.Lock() // Clone to update, only select certain fields. old := &Account{Name: a.Name, exports: a.exports, limits: a.limits, signingKeys: a.signingKeys} // Reset exports and imports here. // Exports is creating a whole new map. a.exports = exportMap{} // Imports are checked unlocked in processInbound, so we can't change out the struct here. Need to process inline. if a.imports.streams != nil { old.imports.streams = a.imports.streams a.imports.streams = nil } if a.imports.services != nil { old.imports.services = make(map[string]*serviceImport, len(a.imports.services)) } for k, v := range a.imports.services { old.imports.services[k] = v delete(a.imports.services, k) } // Reset any notion of export revocations. a.actsRevoked = nil alteredScope := map[string]struct{}{} // update account signing keys a.signingKeys = nil if len(ac.SigningKeys) > 0 { a.signingKeys = make(map[string]jwt.Scope, len(ac.SigningKeys)) } signersChanged := false for k, scope := range ac.SigningKeys { a.signingKeys[k] = scope } if len(a.signingKeys) != len(old.signingKeys) { signersChanged = true } for k, scope := range a.signingKeys { if oldScope, ok := old.signingKeys[k]; !ok { signersChanged = true } else if !reflect.DeepEqual(scope, oldScope) { signersChanged = true alteredScope[k] = struct{}{} } } a.mu.Unlock() gatherClients := func() []*client { a.mu.RLock() clients := make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } a.mu.RUnlock() return clients } jsEnabled := s.JetStreamEnabled() if jsEnabled && a == s.SystemAccount() { for _, export := range allJsExports { s.Debugf("Adding jetstream service export %q for %s", export, a.Name) if err := a.AddServiceExport(export, nil); err != nil { s.Errorf("Error setting up jetstream service exports: %v", err) } } } for _, e := range ac.Exports { switch e.Type { case jwt.Stream: s.Debugf("Adding stream export %q for %s", e.Subject, a.Name) if err := a.AddStreamExport(string(e.Subject), authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding stream export to account [%s]: %v", a.Name, err.Error()) } case jwt.Service: s.Debugf("Adding service export %q for %s", e.Subject, a.Name) rt := Singleton switch e.ResponseType { case jwt.ResponseTypeStream: rt = Streamed case jwt.ResponseTypeChunked: rt = Chunked } if err := a.AddServiceExportWithResponse(string(e.Subject), rt, authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding service export to account [%s]: %v", a.Name, err) continue } sub := string(e.Subject) if e.Latency != nil { if err := a.TrackServiceExportWithSampling(sub, string(e.Latency.Results), int(e.Latency.Sampling)); err != nil { hdrNote := "" if e.Latency.Sampling == jwt.Headers { hdrNote = " (using headers)" } s.Debugf("Error adding latency tracking%s for service export to account [%s]: %v", hdrNote, a.Name, err) } } if e.ResponseThreshold != 0 { // Response threshold was set in options. if err := a.SetServiceExportResponseThreshold(sub, e.ResponseThreshold); err != nil { s.Debugf("Error adding service export response threshold for [%s]: %v", a.Name, err) } } } // We will track these at the account level. Should not have any collisions. if e.Revocations != nil { a.mu.Lock() if a.actsRevoked == nil { a.actsRevoked = make(map[string]int64) } for k, t := range e.Revocations { a.actsRevoked[k] = t } a.mu.Unlock() } } var incompleteImports []*jwt.Import for _, i := range ac.Imports { // check tmpAccounts with priority var acc *Account var err error if v, ok := s.tmpAccounts.Load(i.Account); ok { acc = v.(*Account) } else { acc, err = s.lookupAccount(i.Account) } if acc == nil || err != nil { s.Errorf("Can't locate account [%s] for import of [%v] %s (err=%v)", i.Account, i.Subject, i.Type, err) incompleteImports = append(incompleteImports, i) continue } from := string(i.Subject) to := i.GetTo() switch i.Type { case jwt.Stream: if i.LocalSubject != _EMPTY_ { // set local subject implies to is empty to = string(i.LocalSubject) s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, from, a.Name, to) err = a.AddMappedStreamImportWithClaim(acc, from, to, i) } else { s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, from, a.Name, to) err = a.AddStreamImportWithClaim(acc, from, to, i) } if err != nil { s.Debugf("Error adding stream import to account [%s]: %v", a.Name, err.Error()) incompleteImports = append(incompleteImports, i) } case jwt.Service: if i.LocalSubject != _EMPTY_ { from = string(i.LocalSubject) to = string(i.Subject) } s.Debugf("Adding service import %s:%q for %s:%q", acc.Name, from, a.Name, to) if err := a.AddServiceImportWithClaim(acc, from, to, i); err != nil { s.Debugf("Error adding service import to account [%s]: %v", a.Name, err.Error()) incompleteImports = append(incompleteImports, i) } } } // Now let's apply any needed changes from import/export changes. if !a.checkStreamImportsEqual(old) { awcsti := map[string]struct{}{a.Name: {}} for _, c := range gatherClients() { c.processSubsOnConfigReload(awcsti) } } // Now check if stream exports have changed. if !a.checkStreamExportsEqual(old) || signersChanged { clients := map[*client]struct{}{} // We need to check all accounts that have an import claim from this account. awcsti := map[string]struct{}{} s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkStreamImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, im := range acc.imports.streams { if im != nil && im.acc.Name == a.Name { // Check for if we are still authorized for an import. im.invalid = !a.checkStreamImportAuthorized(acc, im.from, im.claim) awcsti[acc.Name] = struct{}{} for c := range acc.clients { clients[c] = struct{}{} } } } acc.mu.Unlock() return true }) // Now walk clients. for c := range clients { c.processSubsOnConfigReload(awcsti) } } // Now check if service exports have changed. if !a.checkServiceExportsEqual(old) || signersChanged { s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkServiceImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, si := range acc.imports.services { if si != nil && si.acc.Name == a.Name { // Check for if we are still authorized for an import. si.invalid = !a.checkServiceImportAuthorized(acc, si.to, si.claim) if si.latency != nil && !si.response { // Make sure we should still be tracking latency. if se := a.getServiceExport(si.to); se != nil { si.latency = se.latency } } } } acc.mu.Unlock() return true }) } // Now make sure we shutdown the old service import subscriptions. var sids [][]byte a.mu.RLock() c := a.ic for _, si := range old.imports.services { if c != nil && si.sid != nil { sids = append(sids, si.sid) } } a.mu.RUnlock() for _, sid := range sids { c.processUnsub(sid) } // Now do limits if they are present. a.mu.Lock() a.msubs = int32(ac.Limits.Subs) a.mpay = int32(ac.Limits.Payload) a.mconns = int32(ac.Limits.Conn) a.mleafs = int32(ac.Limits.LeafNodeConn) // Check for any revocations if len(ac.Revocations) > 0 { // We will always replace whatever we had with most current, so no // need to look at what we have. a.usersRevoked = make(map[string]int64, len(ac.Revocations)) for pk, t := range ac.Revocations { a.usersRevoked[pk] = t } } else { a.usersRevoked = nil } a.defaultPerms = buildPermissionsFromJwt(&ac.DefaultPermissions) a.incomplete = len(incompleteImports) != 0 for _, i := range incompleteImports { s.incompleteAccExporterMap.Store(i.Account, struct{}{}) } if a.srv == nil { a.srv = s } if jsEnabled { if ac.Limits.JetStreamLimits.DiskStorage != 0 || ac.Limits.JetStreamLimits.MemoryStorage != 0 { // JetStreamAccountLimits and jwt.JetStreamLimits use same value for unlimited a.jsLimits = &JetStreamAccountLimits{ MaxMemory: ac.Limits.JetStreamLimits.MemoryStorage, MaxStore: ac.Limits.JetStreamLimits.DiskStorage, MaxStreams: int(ac.Limits.JetStreamLimits.Streams), MaxConsumers: int(ac.Limits.JetStreamLimits.Consumer), } } else if a.jsLimits != nil { // covers failed update followed by disable a.jsLimits = nil } } a.updated = time.Now() a.mu.Unlock() clients := gatherClients() // Sort if we are over the limit. if a.MaxTotalConnectionsReached() { sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) } if jsEnabled { if err := s.configJetStream(a); err != nil { s.Errorf("Error configuring jetstream for account [%s]: %v", a.Name, err.Error()) a.mu.Lock() // Absent reload of js server cfg, this is going to be broken until js is disabled a.incomplete = true a.mu.Unlock() } } for i, c := range clients { a.mu.RLock() exceeded := a.mconns != jwt.NoLimit && i >= int(a.mconns) a.mu.RUnlock() if exceeded { c.maxAccountConnExceeded() continue } c.mu.Lock() c.applyAccountLimits() theJWT := c.opts.JWT c.mu.Unlock() // Check for being revoked here. We use ac one to avoid the account lock. if ac.Revocations != nil && theJWT != "" { if juc, err := jwt.DecodeUserClaims(theJWT); err != nil { c.Debugf("User JWT not valid: %v", err) c.authViolation() continue } else if ok := ac.IsClaimRevoked(juc); ok { c.sendErrAndDebug("User Authentication Revoked") c.closeConnection(Revocation) continue } } } // Check if the signing keys changed, might have to evict if signersChanged { for _, c := range clients { c.mu.Lock() sk := c.user.SigningKey c.mu.Unlock() if sk == _EMPTY_ { continue } if _, ok := alteredScope[sk]; ok { c.closeConnection(AuthenticationViolation) } else if _, ok := a.hasIssuer(sk); !ok { c.closeConnection(AuthenticationViolation) } } } if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok && refreshImportingAccounts { s.incompleteAccExporterMap.Delete(old.Name) s.accounts.Range(func(key, value interface{}) bool { acc := value.(*Account) acc.mu.RLock() incomplete := acc.incomplete name := acc.Name // Must use jwt in account or risk failing on fetch // This jwt may not be the same that caused exportingAcc to be in incompleteAccExporterMap claimJWT := acc.claimJWT acc.mu.RUnlock() if incomplete && name != old.Name { if accClaims, _, err := s.verifyAccountClaims(claimJWT); err == nil { // Since claimJWT has not changed, acc can become complete // but it won't alter incomplete for it's dependents accounts. s.updateAccountClaimsWithRefresh(acc, accClaims, false) // old.Name was deleted before ranging over accounts // If it exists again, UpdateAccountClaims set it for failed imports of acc. // So there was one import of acc that imported this account and failed again. // Since this account just got updated, the import itself may be in error. So trace that. if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok { s.incompleteAccExporterMap.Delete(old.Name) s.Errorf("Account %s has issues importing account %s", name, old.Name) } } } return true }) } } // Helper to build an internal account structure from a jwt.AccountClaims. // Lock MUST NOT be held upon entry. func (s *Server) buildInternalAccount(ac *jwt.AccountClaims) *Account { acc := NewAccount(ac.Subject) acc.Issuer = ac.Issuer // Set this here since we are placing in s.tmpAccounts below and may be // referenced by an route RS+, etc. s.setAccountSublist(acc) // We don't want to register an account that is in the process of // being built, however, to solve circular import dependencies, we // need to store it here. s.tmpAccounts.Store(ac.Subject, acc) s.UpdateAccountClaims(acc, ac) return acc } // Helper to build Permissions from jwt.Permissions // or return nil if none were specified func buildPermissionsFromJwt(uc *jwt.Permissions) *Permissions { if uc == nil { return nil } var p *Permissions if len(uc.Pub.Allow) > 0 || len(uc.Pub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Publish = &SubjectPermission{} p.Publish.Allow = uc.Pub.Allow p.Publish.Deny = uc.Pub.Deny } if len(uc.Sub.Allow) > 0 || len(uc.Sub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Subscribe = &SubjectPermission{} p.Subscribe.Allow = uc.Sub.Allow p.Subscribe.Deny = uc.Sub.Deny } if uc.Resp != nil { if p == nil { p = &Permissions{} } p.Response = &ResponsePermission{ MaxMsgs: uc.Resp.MaxMsgs, Expires: uc.Resp.Expires, } validateResponsePermissions(p) } return p } // Helper to build internal NKeyUser. func buildInternalNkeyUser(uc *jwt.UserClaims, acts map[string]struct{}, acc *Account) *NkeyUser { nu := &NkeyUser{Nkey: uc.Subject, Account: acc, AllowedConnectionTypes: acts} if uc.IssuerAccount != "" { nu.SigningKey = uc.Issuer } // Now check for permissions. var p = buildPermissionsFromJwt(&uc.Permissions) if p == nil && acc.defaultPerms != nil { p = acc.defaultPerms.clone() } nu.Permissions = p return nu } const fetchTimeout = 2 * time.Second func fetchAccount(res AccountResolver, name string) (string, error) { if !nkeys.IsValidPublicAccountKey(name) { return "", fmt.Errorf("will only fetch valid account keys") } return res.Fetch(name) } // AccountResolver interface. This is to fetch Account JWTs by public nkeys type AccountResolver interface { Fetch(name string) (string, error) Store(name, jwt string) error IsReadOnly() bool Start(server *Server) error IsTrackingUpdate() bool Reload() error Close() } // Default implementations of IsReadOnly/Start so only need to be written when changed type resolverDefaultsOpsImpl struct{} func (*resolverDefaultsOpsImpl) IsReadOnly() bool { return true } func (*resolverDefaultsOpsImpl) IsTrackingUpdate() bool { return false } func (*resolverDefaultsOpsImpl) Start(*Server) error { return nil } func (*resolverDefaultsOpsImpl) Reload() error { return nil } func (*resolverDefaultsOpsImpl) Close() { } func (*resolverDefaultsOpsImpl) Store(_, _ string) error { return fmt.Errorf("Store operation not supported for URL Resolver") } // MemAccResolver is a memory only resolver. // Mostly for testing. type MemAccResolver struct { sm sync.Map resolverDefaultsOpsImpl } // Fetch will fetch the account jwt claims from the internal sync.Map. func (m *MemAccResolver) Fetch(name string) (string, error) { if j, ok := m.sm.Load(name); ok { return j.(string), nil } return _EMPTY_, ErrMissingAccount } // Store will store the account jwt claims in the internal sync.Map. func (m *MemAccResolver) Store(name, jwt string) error { m.sm.Store(name, jwt) return nil } func (ur *MemAccResolver) IsReadOnly() bool { return false } // URLAccResolver implements an http fetcher. type URLAccResolver struct { url string c *http.Client resolverDefaultsOpsImpl } // NewURLAccResolver returns a new resolver for the given base URL. func NewURLAccResolver(url string) (*URLAccResolver, error) { if !strings.HasSuffix(url, "/") { url += "/" } // FIXME(dlc) - Make timeout and others configurable. // We create our own transport to amortize TLS. tr := &http.Transport{ MaxIdleConns: 10, IdleConnTimeout: 30 * time.Second, } ur := &URLAccResolver{ url: url, c: &http.Client{Timeout: fetchTimeout, Transport: tr}, } return ur, nil } // Fetch will fetch the account jwt claims from the base url, appending the // account name onto the end. func (ur *URLAccResolver) Fetch(name string) (string, error) { url := ur.url + name resp, err := ur.c.Get(url) if err != nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, err) } else if resp == nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: no response", url) } else if resp.StatusCode != http.StatusOK { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, resp.Status) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return _EMPTY_, err } return string(body), nil } // Resolver based on nats for synchronization and backing directory for storage. type DirAccResolver struct { *DirJWTStore *Server syncInterval time.Duration } func (dr *DirAccResolver) IsTrackingUpdate() bool { return true } func (dr *DirAccResolver) Reload() error { return dr.DirJWTStore.Reload() } func respondToUpdate(s *Server, respSubj string, acc string, message string, err error) { if err == nil { if acc == "" { s.Debugf("%s", message) } else { s.Debugf("%s - %s", message, acc) } } else { if acc == "" { s.Errorf("%s - %s", message, err) } else { s.Errorf("%s - %s - %s", message, acc, err) } } if respSubj == "" { return } server := &ServerInfo{} response := map[string]interface{}{"server": server} m := map[string]interface{}{} if acc != "" { m["account"] = acc } if err == nil { m["code"] = http.StatusOK m["message"] = message response["data"] = m } else { m["code"] = http.StatusInternalServerError m["description"] = fmt.Sprintf("%s - %v", message, err) response["error"] = m } s.sendInternalMsgLocked(respSubj, _EMPTY_, server, response) } func handleListRequest(store *DirJWTStore, s *Server, reply string) { if reply == "" { return } accIds := make([]string, 0, 1024) if err := store.PackWalk(1, func(partialPackMsg string) { if tk := strings.Split(partialPackMsg, "|"); len(tk) == 2 { accIds = append(accIds, tk[0]) } }); err != nil { // let them timeout s.Errorf("list request error: %v", err) } else { s.Debugf("list request responded with %d account ids", len(accIds)) server := &ServerInfo{} response := map[string]interface{}{"server": server, "data": accIds} s.sendInternalMsgLocked(reply, _EMPTY_, server, response) } } func handleDeleteRequest(store *DirJWTStore, s *Server, msg []byte, reply string) { var accIds []interface{} var subj, sysAccName string if sysAcc := s.SystemAccount(); sysAcc != nil { sysAccName = sysAcc.GetName() } // TODO Can allow keys (issuer) to delete accounts they issued and operator key to delete all accounts. // For now only operator is allowed to delete gk, err := jwt.DecodeGeneric(string(msg)) if err == nil { subj = gk.Subject if store.deleteType == NoDelete { err = fmt.Errorf("delete must be enabled in server config") } else if subj != gk.Issuer { err = fmt.Errorf("not self signed") } else if !s.isTrustedIssuer(gk.Issuer) { err = fmt.Errorf("not trusted") } else if store.operator != gk.Issuer { err = fmt.Errorf("needs to be the operator operator") } else if list, ok := gk.Data["accounts"]; !ok { err = fmt.Errorf("malformed request") } else if accIds, ok = list.([]interface{}); !ok { err = fmt.Errorf("malformed request") } else { for _, entry := range accIds { if acc, ok := entry.(string); !ok || acc == "" || !nkeys.IsValidPublicAccountKey(acc) { err = fmt.Errorf("malformed request") break } else if acc == sysAccName { err = fmt.Errorf("not allowed to delete system account") break } } } } if err != nil { respondToUpdate(s, reply, "", fmt.Sprintf("delete accounts request by %s failed", subj), err) return } errs := []string{} passCnt := 0 for _, acc := range accIds { if err := store.delete(acc.(string)); err != nil { errs = append(errs, err.Error()) } else { passCnt++ } } if len(errs) == 0 { respondToUpdate(s, reply, "", fmt.Sprintf("deleted %d accounts", passCnt), nil) } else { respondToUpdate(s, reply, "", fmt.Sprintf("deleted %d accounts, failed for %d", passCnt, len(errs)), errors.New(strings.Join(errs, "<\n"))) } } func getOperator(s *Server) (string, error) { var op string if opts := s.getOpts(); opts != nil && len(opts.TrustedOperators) > 0 { op = opts.TrustedOperators[0].Subject } if op == "" { return "", fmt.Errorf("no operator found") } return op, nil } func (dr *DirAccResolver) Start(s *Server) error { op, err := getOperator(s) if err != nil { return err } dr.Lock() defer dr.Unlock() dr.Server = s dr.operator = op dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); !ok { } else if jwt, err := dr.LoadAcc(pubKey); err != nil { s.Errorf("update got error on load: %v", err) } else if err := s.updateAccountWithClaimJWT(v.(*Account), jwt); err != nil { s.Errorf("update resulted in error %v", err) } } packRespIb := s.newRespInbox() for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) { pubKey := "" tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { pubKey = tk[accReqAccIndex] } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { s.Debugf("jwt update skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else if claim.Subject != pubKey { err := errors.New("subject does not match jwt content") respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else if err := dr.save(pubKey, string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else { respondToUpdate(s, resp, pubKey, "jwt updated", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } } if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update resulted in error", err) } else if err := dr.save(claim.Subject, string(msg)); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update resulted in error", err) } else { respondToUpdate(s, resp, claim.Subject, "jwt updated", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } // respond to lookups with our version if _, err := s.sysSubscribe(fmt.Sprintf(accLookupReqSubj, "*"), func(_ *subscription, _ *client, subj, reply string, msg []byte) { if reply == "" { return } tk := strings.Split(subj, tsep) if len(tk) != accLookupReqTokens { return } if theJWT, err := dr.DirJWTStore.LoadAcc(tk[accReqAccIndex]); err != nil { s.Errorf("Merging resulted in error: %v", err) } else { s.sendInternalMsgLocked(reply, "", nil, []byte(theJWT)) } }); err != nil { return fmt.Errorf("error setting up lookup request handling: %v", err) } // respond to pack requests with one or more pack messages // an empty message signifies the end of the response responder if _, err := s.sysSubscribeQ(accPackReqSubj, "responder", func(_ *subscription, _ *client, _, reply string, theirHash []byte) { if reply == "" { return } ourHash := dr.DirJWTStore.Hash() if bytes.Equal(theirHash, ourHash[:]) { s.sendInternalMsgLocked(reply, "", nil, []byte{}) s.Debugf("pack request matches hash %x", ourHash[:]) } else if err := dr.DirJWTStore.PackWalk(1, func(partialPackMsg string) { s.sendInternalMsgLocked(reply, "", nil, []byte(partialPackMsg)) }); err != nil { // let them timeout s.Errorf("pack request error: %v", err) } else { s.Debugf("pack request hash %x - finished responding with hash %x", theirHash, ourHash) s.sendInternalMsgLocked(reply, "", nil, []byte{}) } }); err != nil { return fmt.Errorf("error setting up pack request handling: %v", err) } // respond to list requests with one message containing all account ids if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up delete request handling: %v", err) } // embed pack responses into store if _, err := s.sysSubscribe(packRespIb, func(_ *subscription, _ *client, _, _ string, msg []byte) { hash := dr.DirJWTStore.Hash() if len(msg) == 0 { // end of response stream s.Debugf("Merging Finished and resulting in: %x", dr.DirJWTStore.Hash()) return } else if err := dr.DirJWTStore.Merge(string(msg)); err != nil { s.Errorf("Merging resulted in error: %v", err) } else { s.Debugf("Merging succeeded and changed %x to %x", hash, dr.DirJWTStore.Hash()) } }); err != nil { return fmt.Errorf("error setting up pack response handling: %v", err) } // periodically send out pack message quit := s.quitCh s.startGoRoutine(func() { defer s.grWG.Done() ticker := time.NewTicker(dr.syncInterval) for { select { case <-quit: ticker.Stop() return case <-ticker.C: } ourHash := dr.DirJWTStore.Hash() s.Debugf("Checking store state: %x", ourHash) s.sendInternalMsgLocked(accPackReqSubj, packRespIb, nil, ourHash[:]) } }) s.Noticef("Managing all jwt in exclusive directory %s", dr.directory) return nil } func (dr *DirAccResolver) Fetch(name string) (string, error) { if theJWT, err := dr.LoadAcc(name); theJWT != "" { return theJWT, nil } else { dr.Lock() srv := dr.Server dr.Unlock() if srv == nil { return "", err } return srv.fetch(dr, name) // lookup from other server } } func (dr *DirAccResolver) Store(name, jwt string) error { return dr.saveIfNewer(name, jwt) } func NewDirAccResolver(path string, limit int64, syncInterval time.Duration, delete bool) (*DirAccResolver, error) { if limit == 0 { limit = math.MaxInt64 } if syncInterval <= 0 { syncInterval = time.Minute } deleteType := NoDelete if delete { deleteType = RenameDeleted } store, err := NewExpiringDirJWTStore(path, false, true, deleteType, 0, limit, false, 0, nil) if err != nil { return nil, err } return &DirAccResolver{store, nil, syncInterval}, nil } // Caching resolver using nats for lookups and making use of a directory for storage type CacheDirAccResolver struct { DirAccResolver ttl time.Duration } func (s *Server) fetch(res AccountResolver, name string) (string, error) { if s == nil { return "", ErrNoAccountResolver } respC := make(chan []byte, 1) accountLookupRequest := fmt.Sprintf(accLookupReqSubj, name) s.mu.Lock() if s.sys == nil || s.sys.replies == nil { s.mu.Unlock() return "", fmt.Errorf("eventing shut down") } replySubj := s.newRespInbox() replies := s.sys.replies // Store our handler. replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) { clone := make([]byte, len(msg)) copy(clone, msg) s.mu.Lock() if _, ok := replies[replySubj]; ok { select { case respC <- clone: // only use first response and only if there is still interest default: } } s.mu.Unlock() } s.sendInternalMsg(accountLookupRequest, replySubj, nil, []byte{}) quit := s.quitCh s.mu.Unlock() var err error var theJWT string select { case <-quit: err = errors.New("fetching jwt failed due to shutdown") case <-time.After(fetchTimeout): err = errors.New("fetching jwt timed out") case m := <-respC: if err = res.Store(name, string(m)); err == nil { theJWT = string(m) } } s.mu.Lock() delete(replies, replySubj) s.mu.Unlock() close(respC) return theJWT, err } func NewCacheDirAccResolver(path string, limit int64, ttl time.Duration, _ ...dirJWTStoreOption) (*CacheDirAccResolver, error) { if limit <= 0 { limit = 1_000 } store, err := NewExpiringDirJWTStore(path, false, true, HardDelete, 0, limit, true, ttl, nil) if err != nil { return nil, err } return &CacheDirAccResolver{DirAccResolver{store, nil, 0}, ttl}, nil } func (dr *CacheDirAccResolver) Start(s *Server) error { op, err := getOperator(s) if err != nil { return err } dr.Lock() defer dr.Unlock() dr.Server = s dr.operator = op dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); !ok { } else if jwt, err := dr.LoadAcc(pubKey); err != nil { s.Errorf("update got error on load: %v", err) } else if err := s.updateAccountWithClaimJWT(v.(*Account), jwt); err != nil { s.Errorf("update resulted in error %v", err) } } for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) { pubKey := "" tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { pubKey = tk[accReqAccIndex] } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { s.Debugf("jwt update cache skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else if claim.Subject != pubKey { err := errors.New("subject does not match jwt content") respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else if _, ok := s.accounts.Load(pubKey); !ok { respondToUpdate(s, resp, pubKey, "jwt update cache skipped", nil) } else if err := dr.save(pubKey, string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else { respondToUpdate(s, resp, pubKey, "jwt updated cache", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } } if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update cache resulted in error", err) } else if _, ok := s.accounts.Load(claim.Subject); !ok { respondToUpdate(s, resp, claim.Subject, "jwt update cache skipped", nil) } else if err := dr.save(claim.Subject, string(msg)); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update cache resulted in error", err) } else { respondToUpdate(s, resp, claim.Subject, "jwt updated cache", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } // respond to list requests with one message containing all account ids if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } s.Noticef("Managing some jwt in exclusive directory %s", dr.directory) return nil } func (dr *CacheDirAccResolver) Reload() error { return dr.DirAccResolver.Reload() } // Transforms for arbitrarily mapping subjects from one to another for maps, tees and filters. // These can also be used for proper mapping on wildcard exports/imports. // These will be grouped and caching and locking are assumed to be in the upper layers. type transform struct { src, dest string dtoks []string stoks []string dtpi []int8 } // Helper to pull raw place holder index. Returns -1 if not a place holder. func placeHolderIndex(token string) int { if len(token) > 1 && token[0] == '$' { var tp int if n, err := fmt.Sscanf(token, "$%d", &tp); err == nil && n == 1 { return tp } } return -1 } // newTransform will create a new transform checking the src and dest subjects for accuracy. func newTransform(src, dest string) (*transform, error) { // Both entries need to be valid subjects. sv, stokens, npwcs, hasFwc := subjectInfo(src) dv, dtokens, dnpwcs, dHasFwc := subjectInfo(dest) // Make sure both are valid, match fwc if present and there are no pwcs in the dest subject. if !sv || !dv || dnpwcs > 0 || hasFwc != dHasFwc { return nil, ErrBadSubject } var dtpi []int8 // If the src has partial wildcards then the dest needs to have the token place markers. if npwcs > 0 || hasFwc { // We need to count to make sure that the dest has token holders for the pwcs. sti := make(map[int]int) for i, token := range stokens { if len(token) == 1 && token[0] == pwc { sti[len(sti)+1] = i } } nphs := 0 for _, token := range dtokens { tp := placeHolderIndex(token) if tp >= 0 { if tp > npwcs { return nil, ErrBadSubject } nphs++ // Now build up our runtime mapping from dest to source tokens. dtpi = append(dtpi, int8(sti[tp])) } else { dtpi = append(dtpi, -1) } } if nphs != npwcs { return nil, ErrBadSubject } } return &transform{src: src, dest: dest, dtoks: dtokens, stoks: stokens, dtpi: dtpi}, nil } // match will take a literal published subject that is associated with a client and will match and transform // the subject if possible. // TODO(dlc) - We could add in client here to allow for things like foo -> foo.$ACCOUNT func (tr *transform) match(subject string) (string, error) { // Tokenize the subject. This should always be a literal subject. tsa := [32]string{} tts := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) if !isValidLiteralSubject(tts) { return "", ErrBadSubject } if isSubsetMatch(tts, tr.src) { return tr.transform(tts) } return "", ErrNoTransforms } // Do not need to match, just transform. func (tr *transform) transformSubject(subject string) (string, error) { // Tokenize the subject. tsa := [32]string{} tts := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) return tr.transform(tts) } // Do a transform on the subject to the dest subject. func (tr *transform) transform(tokens []string) (string, error) { if len(tr.dtpi) == 0 { return tr.dest, nil } var b strings.Builder var token string // We need to walk destination tokens and create the mapped subject pulling tokens from src. // This is slow and that is ok, transforms should have caching layer in front for mapping transforms // and export/import semantics with streams and services. li := len(tr.dtpi) - 1 for i, index := range tr.dtpi { // <0 means use destination token. if index < 0 { token = tr.dtoks[i] // Break if fwc if len(token) == 1 && token[0] == fwc { break } } else { // >= 0 means use source map index to figure out which source token to pull. token = tokens[index] } b.WriteString(token) if i < li { b.WriteByte(btsep) } } // We may have more source tokens available. This happens with ">". if tr.dtoks[len(tr.dtoks)-1] == ">" { for sli, i := len(tokens)-1, len(tr.stoks)-1; i < len(tokens); i++ { b.WriteString(tokens[i]) if i < sli { b.WriteByte(btsep) } } } return b.String(), nil } // Reverse a transform. func (tr *transform) reverse() *transform { if len(tr.dtpi) == 0 { rtr, _ := newTransform(tr.dest, tr.src) return rtr } // If we are here we need to dynamically get the correct reverse // of this transform. nsrc, phs := transformUntokenize(tr.dest) var nda []string for _, token := range tr.stoks { if token == "*" { if len(phs) == 0 { // TODO(dlc) - Should not happen return nil } nda = append(nda, phs[0]) phs = phs[1:] } else { nda = append(nda, token) } } ndest := strings.Join(nda, tsep) rtr, _ := newTransform(nsrc, ndest) return rtr }
1
12,285
But now you may get DATA RACE reports because c.acc is sometimes changed. I wonder if we should not rework that whole sending subs through route.
nats-io-nats-server
go
@@ -194,7 +194,18 @@ std::shared_ptr<Engine> ADIOS::Open(const std::string &name, "HDF5 library, can't use HDF5\n"); #endif } - + else if (type == "HDF5Reader") // -Junmin + { +//#if defined(ADIOS_HAVE_PHDF5) && defined(ADIOS_HAVE_MPI) +#ifdef ADIOS_HAVE_PHDF5 + //#ifdef ADIOS_HAVE_MPI + return std::make_shared<HDF5Reader>(*this, name, accessMode, mpiComm, + method); +#else + throw std::invalid_argument("ERROR: this version didn't compile with " + "HDF5 library, can't use HDF5\n"); +#endif + } else { if (m_DebugMode == true)
1
/* * Distributed under the OSI-approved Apache License, Version 2.0. See * accompanying file Copyright.txt for details. * * ADIOS.cpp * * Created on: Sep 29, 2016 * Author: William F Godoy */ #include "ADIOS.h" #include "ADIOS.tcc" #include <fstream> #include <ios> //std::ios_base::failure #include <iostream> #include <sstream> #include <utility> #include "adios2/ADIOSMPI.h" #include "adios2/ADIOSMacros.h" #include "adios2/core/adiosFunctions.h" #include "adios2/engine/bp/BPFileReader.h" #include "adios2/engine/bp/BPFileWriter.h" #ifdef ADIOS2_HAVE_DATAMAN // external dependencies #include "adios2/engine/dataman/DataManReader.h" #include "adios2/engine/dataman/DataManWriter.h" #endif #ifdef ADIOS2_HAVE_ADIOS1 // external dependencies #include "adios2/engine/adios1/ADIOS1Reader.h" #include "adios2/engine/adios1/ADIOS1Writer.h" #endif #ifdef ADIOS2_HAVE_HDF5 // external dependencies #include "adios2/engine/hdf5/HDF5ReaderP.h" #include "adios2/engine/hdf5/HDF5WriterP.h" #endif namespace adios { ADIOS::ADIOS(const Verbose verbose, const bool debugMode) : ADIOS("", MPI_COMM_SELF, verbose, debugMode) { } ADIOS::ADIOS(const std::string config, const Verbose verbose, const bool debugMode) : ADIOS(config, MPI_COMM_SELF, verbose, debugMode) { } ADIOS::ADIOS(const std::string configFile, MPI_Comm mpiComm, const Verbose verbose, const bool debugMode) : m_MPIComm(mpiComm), m_ConfigFile(configFile), m_DebugMode(debugMode) { InitMPI(); // InitXML( m_XMLConfigFile, m_MPIComm, m_DebugMode, m_HostLanguage, // m_Transforms, m_Groups ); } ADIOS::ADIOS(MPI_Comm mpiComm, const Verbose verbose, const bool debugMode) : ADIOS("", mpiComm, verbose, debugMode) { } // ADIOS::~ADIOS() {} void ADIOS::InitMPI() { if (m_DebugMode == true) { if (m_MPIComm == MPI_COMM_NULL) { throw std::ios_base::failure( "ERROR: engine communicator is MPI_COMM_NULL," " in call to ADIOS Open or Constructor\n"); } } MPI_Comm_rank(m_MPIComm, &m_RankMPI); MPI_Comm_size(m_MPIComm, &m_SizeMPI); } Method &ADIOS::DeclareMethod(const std::string methodName) { if (m_DebugMode == true) { if (m_Methods.count(methodName) == 1) { throw std::invalid_argument( "ERROR: method " + methodName + " already declared, from DeclareMethod\n"); } } m_Methods.emplace(methodName, Method(methodName, m_DebugMode)); return m_Methods.at(methodName); } std::shared_ptr<Engine> ADIOS::Open(const std::string &name, const std::string accessMode, MPI_Comm mpiComm, const Method &method) { if (m_DebugMode == true) { if (m_EngineNames.count(name) == 1) // Check if Engine already exists { throw std::invalid_argument( "ERROR: engine name " + name + " already created by Open, in call from Open.\n"); } } m_EngineNames.insert(name); const std::string type(method.m_Type); const bool isDefaultWriter = (accessMode == "w" || accessMode == "write" || accessMode == "a" || accessMode == "append") && type.empty() ? true : false; const bool isDefaultReader = (accessMode == "r" || accessMode == "read") && type.empty() ? true : false; if (isDefaultWriter || type == "BPFileWriter" || type == "bpfilewriter") { return std::make_shared<BPFileWriter>(*this, name, accessMode, mpiComm, method); } else if (isDefaultReader || type == "BPReader" || type == "bpreader") { return std::make_shared<BPFileReader>(*this, name, accessMode, mpiComm, method); } else if (type == "SIRIUS" || type == "sirius" || type == "Sirius") { // not yet supported // return std::make_shared<engine::DataMan>( *this, name, accessMode, // mpiComm, method, iomode, timeout_sec, m_DebugMode, method.m_nThreads // ); } else if (type == "DataManWriter") { #ifdef ADIOS2_HAVE_DATAMAN return std::make_shared<DataManWriter>(*this, name, accessMode, mpiComm, method); #else throw std::invalid_argument( "ERROR: this version didn't compile with " "Dataman library, can't Open DataManWriter\n"); #endif } else if (type == "DataManReader") { #ifdef ADIOS2_HAVE_DATAMAN return std::make_shared<DataManReader>(*this, name, accessMode, mpiComm, method); #else throw std::invalid_argument( "ERROR: this version didn't compile with " "Dataman library, can't Open DataManReader\n"); #endif } else if (type == "ADIOS1Writer") { #ifdef ADIOS2_HAVE_ADIOS1 return std::make_shared<ADIOS1Writer>(*this, name, accessMode, mpiComm, method); #else throw std::invalid_argument( "ERROR: this version didn't compile with ADIOS " "1.x library, can't Open ADIOS1Writer\n"); #endif } else if (type == "Vis") { // return std::make_shared<Vis>( *this, name, accessMode, mpiComm, // method, // iomode, timeout_sec, m_DebugMode, method.m_nThreads ); } else if (type == "HDF5Writer") // -junmin { #ifdef ADIOS2_HAVE_HDF5 return std::make_shared<HDF5Writer>(*this, name, accessMode, mpiComm, method); #else throw std::invalid_argument("ERROR: this version didn't compile with " "HDF5 library, can't use HDF5\n"); #endif } else { if (m_DebugMode == true) { throw std::invalid_argument("ERROR: method type " + type + " not supported for " + name + ", in call to Open\n"); } } return nullptr; // if debug mode is off } std::shared_ptr<Engine> ADIOS::Open(const std::string &name, const std::string accessMode, const Method &method) { return Open(name, accessMode, m_MPIComm, method); } std::shared_ptr<Engine> ADIOS::Open(const std::string &name, const std::string accessMode, MPI_Comm mpiComm, const std::string methodName) { auto itMethod = m_Methods.find(methodName); if (m_DebugMode == true) { CheckMethod(itMethod, methodName, " in call to Open\n"); } return Open(name, accessMode, mpiComm, itMethod->second); } std::shared_ptr<Engine> ADIOS::Open(const std::string &name, const std::string accessMode, const std::string methodName) { return Open(name, accessMode, m_MPIComm, methodName); } std::shared_ptr<Engine> ADIOS::OpenFileReader(const std::string &fileName, MPI_Comm mpiComm, const Method &method) { return Open(fileName, "r", mpiComm, method); } std::shared_ptr<Engine> ADIOS::OpenFileReader(const std::string &name, MPI_Comm mpiComm, const std::string methodName) { auto itMethod = m_Methods.find(methodName); if (m_DebugMode == true) { CheckMethod(itMethod, methodName, " in call to Open\n"); } return Open(name, "r", m_MPIComm, itMethod->second); } VariableCompound &ADIOS::GetVariableCompound(const std::string &name) { return m_Compound.at(GetVariableIndex<void>(name)); } void ADIOS::MonitorVariables(std::ostream &logStream) { logStream << "\tVariable \t Type\n"; for (auto &variablePair : m_Variables) { const std::string name(variablePair.first); const std::string type(variablePair.second.first); if (type == GetType<char>()) { GetVariable<char>(name).Monitor(logStream); } else if (type == GetType<unsigned char>()) { GetVariable<unsigned char>(name).Monitor(logStream); } else if (type == GetType<short>()) { GetVariable<short>(name).Monitor(logStream); } else if (type == GetType<unsigned short>()) { GetVariable<unsigned short>(name).Monitor(logStream); } else if (type == GetType<int>()) { GetVariable<int>(name).Monitor(logStream); } else if (type == GetType<unsigned int>()) { GetVariable<unsigned int>(name).Monitor(logStream); } else if (type == GetType<long int>()) { GetVariable<long int>(name).Monitor(logStream); } else if (type == GetType<unsigned long int>()) { GetVariable<unsigned long int>(name).Monitor(logStream); } else if (type == GetType<long long int>()) { GetVariable<long long int>(name).Monitor(logStream); } else if (type == GetType<unsigned long long int>()) { GetVariable<unsigned long long int>(name).Monitor(logStream); } else if (type == GetType<float>()) { GetVariable<float>(name).Monitor(logStream); } else if (type == GetType<double>()) { GetVariable<double>(name).Monitor(logStream); } else if (type == GetType<long double>()) { GetVariable<long double>(name).Monitor(logStream); } else if (type == GetType<std::complex<float>>()) { GetVariable<std::complex<float>>(name).Monitor(logStream); } else if (type == GetType<std::complex<double>>()) { GetVariable<std::complex<double>>(name).Monitor(logStream); } else if (type == GetType<std::complex<long double>>()) { GetVariable<std::complex<long double>>(name).Monitor(logStream); } } } // PRIVATE FUNCTIONS BELOW void ADIOS::CheckVariableInput(const std::string &name, const Dims &dimensions) const { if (m_DebugMode == true) { if (m_Variables.count(name) == 1) { throw std::invalid_argument( "ERROR: variable " + name + " already exists, in call to DefineVariable\n"); } if (dimensions.empty() == true) { throw std::invalid_argument( "ERROR: variable " + name + " dimensions can't be empty, in call to DefineVariable\n"); } } } void ADIOS::CheckVariableName( std::map<std::string, std::pair<std::string, unsigned int>>::const_iterator itVariable, const std::string &name, const std::string hint) const { if (m_DebugMode == true) { if (itVariable == m_Variables.end()) { throw std::invalid_argument("ERROR: variable " + name + " does not exist " + hint + "\n"); } } } void ADIOS::CheckMethod(std::map<std::string, Method>::const_iterator itMethod, const std::string methodName, const std::string hint) const { if (itMethod == m_Methods.end()) { throw std::invalid_argument("ERROR: method " + methodName + " not found " + hint + "\n"); } } //------------------------------------------------------------------------------ // Explicitly instantiate the necessary template implementations #define define_template_instantiation(T) \ template Variable<T> &ADIOS::DefineVariable<T>( \ const std::string &, const Dims, const Dims, const Dims); \ \ template Variable<T> &ADIOS::GetVariable<T>(const std::string &); ADIOS_FOREACH_TYPE_1ARG(define_template_instantiation) template unsigned int ADIOS::GetVariableIndex<void>(const std::string &); #undef define_template_instatiation //------------------------------------------------------------------------------ } // end namespace adios
1
11,481
Use `ADIADIOS2_HAVE_HDF5`, not `ADIOS_HAVE_PHDF5`
ornladios-ADIOS2
cpp
@@ -282,6 +282,7 @@ module RSpec context "with a non-string and a string" do it "concats the args" do expect(group_value_for Object, 'group').to eq("Object group") + expect(group_value_for 'group', Object).to eq("group Object") end end
1
require 'spec_helper' module RSpec module Core RSpec.describe Metadata do describe '.relative_path' do let(:here) { File.expand_path(".") } it "transforms absolute paths to relative paths" do expect(Metadata.relative_path(here)).to eq "." end it "transforms absolute paths to relative paths anywhere in its argument" do expect(Metadata.relative_path("foo #{here} bar")).to eq "foo . bar" end it "returns nil if passed an unparseable file:line combo" do expect(Metadata.relative_path("-e:1")).to be_nil end # I have no idea what line = line.sub(/\A([^:]+:\d+)$/, '\\1') is supposed to do it "gracefully returns nil if run in a secure thread" do safely do value = Metadata.relative_path(".") # on some rubies, File.expand_path is not a security error, so accept "." as well expect([nil, "."]).to include(value) end end end context "when created" do Metadata::RESERVED_KEYS.each do |key| it "prohibits :#{key} as a hash key for an example group" do expect { RSpec.describe("group", key => {}) }.to raise_error(/:#{key} is not allowed/) end it "prohibits :#{key} as a hash key for an example" do group = RSpec.describe("group") expect { group.example("example", key => {}) }.to raise_error(/:#{key} is not allowed/) end end it "uses :caller if passed as part of the user metadata" do m = nil RSpec.describe('group', :caller => ['example_file:42']) do m = metadata end expect(m[:location]).to eq("example_file:42") end end context "for an example" do let(:line_number) { __LINE__ + 3 } def metadata_for(*args) RSpec.describe("group description") do return example(*args).metadata end end alias example_metadata metadata_for RSpec::Matchers.define :have_value do |value| chain(:for) { |key| @key = key } match do |metadata| expect(metadata.fetch(@key)).to eq(value) expect(metadata[@key]).to eq(value) end end it "stores the description args" do expect(metadata_for "example description").to have_value(["example description"]).for(:description_args) end it "ignores nil description args" do expect(example_metadata).to have_value([]).for(:description_args) end it "stores the full_description (group description + example description)" do expect(metadata_for "example description").to have_value("group description example description").for(:full_description) end it "creates an empty execution result" do expect(example_metadata[:execution_result].to_h.reject { |_, v| v.nil? } ).to eq({}) end it "extracts file path from caller" do expect(example_metadata).to have_value(relative_path(__FILE__)).for(:file_path) end it "extracts line number from caller" do expect(example_metadata).to have_value(line_number).for(:line_number) end it "extracts location from caller" do expect(example_metadata).to have_value("#{relative_path(__FILE__)}:#{line_number}").for(:location) end it "uses :caller if passed as an option" do example_metadata = metadata_for('example description', :caller => ['example_file:42']) expect(example_metadata).to have_value("example_file:42").for(:location) end it "merges arbitrary options" do expect(metadata_for("desc", :arbitrary => :options)).to have_value(:options).for(:arbitrary) end it "points :example_group to the same hash object as other examples in the same group" do a = b = nil RSpec.describe "group" do a = example("foo").metadata[:example_group] b = example("bar").metadata[:example_group] end a[:description] = "new description" pending "Cannot maintain this and provide full `:example_group` backwards compatibility (see GH #1490):(" expect(b[:description]).to eq("new description") end it 'does not include example-group specific keys' do metadata = nil RSpec.describe "group" do context "nested" do metadata = example("foo").metadata end end expect(metadata.keys).not_to include(:parent_example_group) end end describe ":block" do context "for example group metadata" do it "contains the example group block" do block = Proc.new { } group = RSpec.describe("group", &block) expect(group.metadata[:block]).to equal(block) end end context "for example metadata" do it "contains the example block" do block = Proc.new { } group = RSpec.describe("group") example = group.example("example", &block) expect(example.metadata[:block]).to equal(block) end end end describe ":described_class" do value_from = lambda do |group| group.metadata[:described_class] end context "in an outer group" do define_method :value_for do |arg| value_from[RSpec.describe(arg)] end context "with a String" do it "returns nil" do expect(value_for "group").to be_nil end end context "with a Symbol" do it "returns the symbol" do expect(value_for :group).to be(:group) end end context "with a class" do it "returns the class" do expect(value_for String).to be(String) end end end context "in a nested group" do it "inherits the parent group's described class" do value = nil RSpec.describe(Hash) do describe "sub context" do value = value_from[self] end end expect(value).to be(Hash) end it "sets the described class when passing a class" do value = nil RSpec.describe(String) do describe Array do value = value_from[self] end end expect(value).to be(Array) end it 'does not override the :described_class when passing no describe args' do value = nil RSpec.describe(String) do describe do value = value_from[self] end end expect(value).to be(String) end it "can override a parent group's described class using metdata" do parent_value = child_value = grandchild_value = nil RSpec.describe(String) do parent_value = value_from[self] describe "sub context" do metadata[:described_class] = Hash child_value = value_from[self] describe "sub context" do grandchild_value = value_from[self] end end end expect(grandchild_value).to be(Hash) expect(child_value).to be(Hash) expect(parent_value).to be(String) end end end describe ":description" do context "on a example" do it "just has the example description" do value = nil RSpec.describe "group" do value = example("example").metadata[:description] end expect(value).to eq("example") end end context "on a group" do def group_value_for(*args) value = nil RSpec.describe(*args) do value = metadata[:description] end value end context "with a string" do it "provides the submitted description" do expect(group_value_for "group").to eq("group") end end context "with a non-string" do it "provides the string form of the submitted object" do expect(group_value_for Hash).to eq("Hash") end end context "with a non-string and a string" do it "concats the args" do expect(group_value_for Object, 'group').to eq("Object group") end end context "with empty args" do it "returns empty string for [:description]" do expect(group_value_for()).to eq("") end end end end describe ":full_description" do context "on an example" do it "concats example group name and description" do value = nil RSpec.describe "group" do value = example("example").metadata[:full_description] end expect(value).to eq("group example") end end it "concats nested example group descriptions" do group_value = example_value = nil RSpec.describe "parent" do describe "child" do group_value = metadata[:full_description] example_value = example("example").metadata[:full_description] end end expect(group_value).to eq("parent child") expect(example_value).to eq("parent child example") end it "concats nested example group descriptions three deep" do grandparent_value = parent_value = child_value = example_value = nil RSpec.describe "grandparent" do grandparent_value = metadata[:full_description] describe "parent" do parent_value = metadata[:full_description] describe "child" do child_value = metadata[:full_description] example_value = example("example").metadata[:full_description] end end end expect(grandparent_value).to eq("grandparent") expect(parent_value).to eq("grandparent parent") expect(child_value).to eq("grandparent parent child") expect(example_value).to eq("grandparent parent child example") end %w[# . ::].each do |char| context "with a 2nd arg starting with #{char}" do it "removes the space" do value = nil RSpec.describe Array, "#{char}method" do value = metadata[:full_description] end expect(value).to eq("Array#{char}method") end end context "with a description starting with #{char} nested under a module" do it "removes the space" do value = nil RSpec.describe Object do describe "#{char}method" do value = metadata[:full_description] end end expect(value).to eq("Object#{char}method") end end context "with a description starting with #{char} nested under a context string" do it "does not remove the space" do value = nil RSpec.describe(Array) do context "with 2 items" do describe "#{char}method" do value = metadata[:full_description] end end end expect(value).to eq("Array with 2 items #{char}method") end end end end describe ":file_path" do it "finds the first non-rspec lib file in the caller array" do value = nil RSpec.describe(:caller => ["./lib/rspec/core/foo.rb", "#{__FILE__}:#{__LINE__}"]) do value = metadata[:file_path] end expect(value).to eq(relative_path(__FILE__)) end end describe ":line_number" do def value_for(*args) value = nil @describe_line = __LINE__ + 1 RSpec.describe("group", *args) do value = metadata[:line_number] end value end it "finds the line number with the first non-rspec lib file in the backtrace" do expect(value_for()).to eq(@describe_line) end it "finds the line number with the first spec file with drive letter" do expect(value_for(:caller => [ "C:/path/to/file_spec.rb:#{__LINE__}" ])).to eq(__LINE__) end it "uses the number after the first : for ruby 1.9" do expect(value_for(:caller => [ "#{__FILE__}:#{__LINE__}:999" ])).to eq(__LINE__) end end describe "child example group" do it "nests the parent's example group metadata" do child = parent = nil RSpec.describe Object, "parent" do parent = metadata describe { child = metadata } end expect(child[:parent_example_group]).to eq(parent) end end it 'does not have a `:parent_example_group` key for a top level group' do meta = RSpec.describe(Object).metadata expect(meta).not_to include(:parent_example_group) end describe "backwards compatibility" do before { allow_deprecation } describe ":example_group" do it 'issues a deprecation warning when the `:example_group` key is accessed' do expect_deprecation_with_call_site(__FILE__, __LINE__ + 2, /:example_group/) RSpec.describe(Object, "group") do metadata[:example_group] end end it 'does not issue a deprecation warning when :example_group is accessed while applying configured filterings' do RSpec.configuration.include Module.new, :example_group => { :file_path => /.*/ } expect_no_deprecation RSpec.describe(Object, "group") end it 'can still access the example group attributes via [:example_group]' do meta = nil RSpec.describe(Object, "group") { meta = metadata } expect(meta[:example_group][:line_number]).to eq(__LINE__ - 2) expect(meta[:example_group][:description]).to eq("Object group") end it 'can access the parent example group attributes via [:example_group][:example_group]' do parent = child = nil parent_line = __LINE__ + 1 RSpec.describe(Object, "group", :foo => 3) do parent = metadata describe("nested") { child = metadata } end expect(child[:example_group][:example_group].to_h).to include( :foo => 3, :description => "Object group", :line_number => parent_line ) end it "works properly with deep nesting" do inner_metadata = nil RSpec.describe "Level 1" do describe "Level 2" do describe "Level 3" do inner_metadata = example("Level 4").metadata end end end expect(inner_metadata[:description]).to eq("Level 4") expect(inner_metadata[:example_group][:description]).to eq("Level 3") expect(inner_metadata[:example_group][:example_group][:description]).to eq("Level 2") expect(inner_metadata[:example_group][:example_group][:example_group][:description]).to eq("Level 1") expect(inner_metadata[:example_group][:example_group][:example_group][:example_group]).to be_nil end it "works properly with shallow nesting" do inner_metadata = nil RSpec.describe "Level 1" do inner_metadata = example("Level 2").metadata end expect(inner_metadata[:description]).to eq("Level 2") expect(inner_metadata[:example_group][:description]).to eq("Level 1") expect(inner_metadata[:example_group][:example_group]).to be_nil end it 'allows integration libraries like VCR to infer a fixture name from the example description by walking up nesting structure' do fixture_name_for = lambda do |metadata| description = metadata[:description] if example_group = metadata[:example_group] [fixture_name_for[example_group], description].join('/') else description end end ex = inferred_fixture_name = nil RSpec.configure do |config| config.before(:example, :infer_fixture) { |e| inferred_fixture_name = fixture_name_for[e.metadata] } end RSpec.describe "Group", :infer_fixture do ex = example("ex") { } end.run raise ex.execution_result.exception if ex.execution_result.exception expect(inferred_fixture_name).to eq("Group/ex") end it 'can mutate attributes when accessing them via [:example_group]' do meta = nil RSpec.describe(String) do describe "sub context" do meta = metadata end end expect { meta[:example_group][:described_class] = Hash }.to change { meta[:described_class] }.from(String).to(Hash) end it 'can still be filtered via a nested key under [:example_group] as before' do meta = nil line = __LINE__ + 1 RSpec.describe("group") { meta = metadata } applies = MetadataFilter.any_apply?( { :example_group => { :line_number => line } }, meta ) expect(applies).to be true end end describe ":example_group_block" do it 'returns the block' do meta = nil RSpec.describe "group" do meta = metadata end expect(meta[:example_group_block]).to be_a(Proc).and eq(meta[:block]) end it 'issues a deprecation warning' do expect_deprecation_with_call_site(__FILE__, __LINE__ + 2, /:example_group_block/) RSpec.describe "group" do metadata[:example_group_block] end end end describe ":describes" do context "on an example group metadata hash" do it 'returns the described_class' do meta = nil RSpec.describe Hash do meta = metadata end expect(meta[:describes]).to be(Hash).and eq(meta[:described_class]) end it 'issues a deprecation warning' do expect_deprecation_with_call_site(__FILE__, __LINE__ + 2, /:describes/) RSpec.describe "group" do metadata[:describes] end end end context "an an example metadata hash" do it 'returns the described_class' do meta = nil RSpec.describe Hash do meta = example("ex").metadata end expect(meta[:describes]).to be(Hash).and eq(meta[:described_class]) end it 'issues a deprecation warning' do expect_deprecation_with_call_site(__FILE__, __LINE__ + 2, /:describes/) RSpec.describe "group" do example("ex").metadata[:describes] end end end end end end end end
1
13,553
Would be nice to put this in a separate context named `"with a string and a non-string"` (since that's what it is -- it's definitely not a non-string and a string!).
rspec-rspec-core
rb
@@ -1265,7 +1265,7 @@ void ResStatisticsStatement::setStatistics(SRVR_STMT_HDL *pSrvrStmt, SQLSTATS_TY #define MAX_PERTABLE_STATS_DESC 30 #define MAX_MASTERSTATS_ENTRY 31 -#define MAX_MEASSTATS_ENTRY 34 +#define MAX_MEASSTATS_ENTRY 26 #define MAX_PERTABLE_ENTRY 14 int i;
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ ********************************************************************/ /* MODULE: ResStatisticsStatement.cpp PURPOSE: Implements the member functions of ResStatisticsStatement class */ //#define STATS_CLI //#undef STATS_CLI #include <platform_ndcs.h> #include "ResStatisticsStatement.h" #include <errno.h> #include <sys/resource.h> #include "QSGlobal.h" #include "QSData.h" #include "NskUtil.h" #include <sstream> #include <algorithm> #include "commonFunctions.h" using namespace std; #define SEP " " #define PREPARE 1 #define EXECUTE 2 #define FETCH 3 #define CLOSE 4 #define EXECDIRECT 5 #define STATS_ROWTYPE "statsRowType" #define STATS_ROWTYPE_LEN 12 // void sendQueryStats(pub_struct_type pub_type, std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> pQuery_info); extern ResStatisticsSession *resStatSession; using namespace SRVR; ResStatisticsStatement::ResStatisticsStatement(bool useCLI) { useCLI_ = useCLI; flag = 0; memset(tmpString,'\0',MAX_ROWBUF_SIZE); tmpFlag = FALSE; catFlagOn = FALSE; SQLValue = NULL; // row 1 of RMS stats memset(queryId, '\0', MAX_QUERY_NAME_LEN + 1); compStartTime = 0; compEndTime = 0; compTime = 0; exeStartTime = 0; exeEndTime = 0; exeTime = 0; // same as exeElapsedTime ? // rowsAffected = 0; sqlErrorCode = 0; statsErrorCode = 0; state = 0; statsType = 0; queryType = 0; estRowsAccessed = 0.0f; estRowsUsed = 0.0f; exeElapsedTime = 0; memset(parentQID, '\0', MAX_QUERY_NAME_LEN + 1); // start of new col strcpy(parentQID,"NONE"); memset(childQID, '0', sizeof(childQID)); numSqlProcs = 0; numCpus = 0; memset(sqlSrc, '\0', RMS_STORE_SQL_SOURCE_LEN + 1); sqlSrcLen = 0; exePriority = 0; memset(transID, '\0', MAX_TXN_STR_LEN + 1); strcpy(transID,"<N/A>"); rowsReturned = 0; NumRowsIUD = 0; firstRowReturnTime = 0; memset(subQryType, '\0', SUB_QRY_TYPE_LEN + 1); memset(parentSysName, '\0', PAR_SYS_NAME_LEN + 1); // row2 of RMS stats NumMessages = 0; MessagesBytes = 0; AccessedRows = 0; UsedRows = 0; DiskIOs = 0; Escalations = 0; LockWaits = 0; Opens = 0; OpenTime = 0; StatsBytes = 0; ProcessBusyTime = 0; DiskProcessBusyTime = 0; NewProcess = 0; NewProcessTime = 0; SpaceTotal = 0; SpaceUsed = 0; HeapTotal = 0; HeapUsed = 0; CpuTime = 0; Dp2SpaceTotal = 0; Dp2SpaceUsed = 0; Dp2HeapTotal = 0; Dp2HeapUsed = 0; UdrCpuTime = 0; reqMsgCnt = 0; reqMsgBytes = 0; replyMsgCnt = 0; replyMsgBytes = 0; TotalMemAlloc = 0; ScratchFileCount = 0; ScratchBufferBlockSize = 0; ScratchBufferBlocksRead = 0; ScratchBufferBlocksWritten = 0; ScratchOverflowMode = 0; ScratchBufferReadCount = 0; ScratchBufferWriteCount = 0; estimatedCost = 0; rtsExeCols = NULL; rtsDp2Cols = NULL; // // AGGREGATION----------------CONSTRUCTOR // // // rules // bzero(con_rule_name,sizeof(con_rule_name)); bzero(cmp_rule_name,sizeof(cmp_rule_name)); bzero(exe_rule_name,sizeof(exe_rule_name)); pubStarted = false; queryFinished = false; wouldLikeToStart_ts = 0; } ResStatisticsStatement::~ResStatisticsStatement() { RTS_Col *tmpRtsCol; while ( rtsExeCols ) { tmpRtsCol = rtsExeCols->next; delete rtsExeCols; rtsExeCols = NULL; rtsExeCols = tmpRtsCol; } while ( rtsDp2Cols ) { tmpRtsCol = rtsDp2Cols->next; delete rtsDp2Cols; rtsDp2Cols = NULL; rtsDp2Cols = tmpRtsCol; } } void ResStatisticsStatement::init() { stmtType = 0; //sqlExecutionTime = 0; //sqlElapseTime = 0; odbcElapseTime = 0; odbcExecutionTime = 0; queryElapseTime = 0; queryExecutionTime = 0; estimatedCost = 0; maxRowCnt = 1000; maxRowLen = 1000; retcode = 0; //sqlExecutionTime = 0; //sqlElapseTime = 0; // row 1 memset(queryId, '\0', MAX_QUERY_NAME_LEN + 1); compStartTime = 0; compEndTime = 0; compTime = 0; exeStartTime = 0; exeEndTime = 0; exeTime = 0; // rowsAffected = 0; sqlErrorCode = 0; statsErrorCode = 0; state = 0; statsType = 0; queryType = 0; estRowsAccessed = 0.0f; estRowsUsed = 0.0f; exeElapsedTime = 0; // New_Col memset(parentQID, '\0', sizeof(parentQID)); strcpy(parentQID,"NONE"); memset(childQID, '0', sizeof(childQID)); numSqlProcs = 0; numCpus = 0; memset(sqlSrc, '\0', sizeof(sqlSrc)); sqlSrcLen = 0; exePriority = 0; memset(transID , '\0', sizeof(transID )); strcpy(transID,"<N/A>"); rowsReturned = 0; firstRowReturnTime = 0; memset(subQryType, '\0', sizeof(subQryType)); memset(parentSysName, '\0', sizeof(parentSysName)); // row 2 NumMessages = 0; MessagesBytes = 0; AccessedRows = 0; UsedRows = 0; DiskIOs = 0; Escalations = 0; LockWaits = 0; Opens = 0; OpenTime = 0; StatsBytes = 0; ProcessBusyTime = 0; DiskProcessBusyTime = 0; NewProcess = 0; NewProcessTime = 0; SpaceTotal = 0; SpaceUsed = 0; HeapTotal = 0; HeapUsed = 0; CpuTime = 0; Dp2SpaceTotal = 0; Dp2SpaceUsed = 0; Dp2HeapTotal = 0; Dp2HeapUsed = 0; UdrCpuTime = 0; // New_Col reqMsgCnt = 0; reqMsgBytes = 0; replyMsgCnt = 0; replyMsgBytes = 0; TotalMemAlloc = 0; estTotalMem = 0; // ScratchFileCount = 0; ScratchBufferBlockSize = 0; ScratchBufferBlocksRead = 0; ScratchBufferBlocksWritten = 0; ScratchOverflowMode = 0; ScratchBufferReadCount = 0; ScratchBufferWriteCount = 0; } // string getSrvrSubstate(NDCS_SUBSTATE mx_substate) { switch(mx_substate) { case NDCS_DLG_INIT: return FMT_NDCS_DLG_INIT; case NDCS_CONN_IDLE: return FMT_NDCS_CONN_IDLE; case NDCS_DLG_TERM: return FMT_NDCS_DLG_TERM; case NDCS_DLG_BREAK: return FMT_NDCS_DLG_BREAK; case NDCS_STOP_SRVR: return FMT_NDCS_STOP_SRVR; case NDCS_RMS_ERROR: return FMT_NDCS_RMS_ERROR; case NDCS_REPOS_IDLE: return FMT_NDCS_REPOS_IDLE; case NDCS_REPOS_INTERVAL: return FMT_NDCS_REPOS_INTERVAL; case NDCS_REPOS_PARTIAL: return FMT_NDCS_REPOS_PARTIAL; case NDCS_EXEC_INTERVAL: return FMT_NDCS_EXEC_INTERVAL; case NDCS_CONN_RULE_CHANGED: return FMT_NDCS_CONN_RULE_CHANGED; case NDCS_CLOSE: return FMT_NDCS_CLOSE; case NDCS_PREPARE: return FMT_NDCS_PREPARE; case NDCS_WMS_ERROR: return FMT_NDCS_WMS_ERROR; case NDCS_QUERY_CANCELED: return FMT_NDCS_QUERY_CANCELED; case NDCS_QUERY_REJECTED: return FMT_NDCS_QUERY_REJECTED; case NDCS_INIT: default: return FMT_UNKNOWN; } } // //=============================================== string getSQLStateStringRes(UInt32 state) { switch(state) { case SQLSTMT_STATE_INITIAL: return FMT_INITIAL; case SQLSTMT_STATE_OPEN: return FMT_OPEN; case SQLSTMT_STATE_EOF: return FMT_EOF; case SQLSTMT_STATE_CLOSE: return FMT_CLOSE; case SQLSTMT_STATE_DEALLOCATED: return FMT_DEALLOCATED; case SQLSTMT_STATE_FETCH: return FMT_FETCH; case SQLSTMT_STATE_CLOSE_TABLES: return FMT_CLOSE_TABLES; case SQLSTMT_STATE_PREPARE: return FMT_PREPARE; case SQLSTMT_STATE_PROCESS_ENDED: return FMT_PROCESS_ENDED; default: return FMT_UNKNOWN; } } string getQueryStateStringRes(unsigned short state) { switch(state) { case QUERY_INIT: return FMT_INIT; break; case QUERY_WAITING: case QUERY_WAITING_MAX_CPU_BUSY: case QUERY_WAITING_MAX_MEM_USAGE: case QUERY_WAITING_RELEASED_BY_ADMIN: case QUERY_WAITING_MAX_SERVICE_EXEC_QUERIES: case QUERY_WAITING_MAX_INSTANCE_EXEC_QUERIES: case QUERY_WAITING_TXN_BACKOUT: case QUERY_WAITING_MAX_ESPS: case QUERY_WAITING_CANARY_EXEC: case QUERY_WAITING_EST_MAX_CPU_BUSY: return FMT_WAITING; break; case QUERY_EXECUTING: case QUERY_EXECUTING_RELEASED_BY_ADMIN: case QUERY_EXECUTING_RELEASED_BY_RULE: case QUERY_EXECUTING_CANCEL_IN_PROGRESS: case QUERY_EXECUTING_CANCEL_FAILED: case QUERY_EXECUTING_CANCEL_FAILED_8026: case QUERY_EXECUTING_CANCEL_FAILED_8027: case QUERY_EXECUTING_CANCEL_FAILED_8028: case QUERY_EXECUTING_CANCEL_FAILED_8029: case QUERY_EXECUTING_CANCEL_FAILED_8031: return FMT_EXECUTING; break; case QUERY_HOLDING: case QUERY_HOLDING_LOAD: case QUERY_HOLDING_REPREPARING: case QUERY_HOLDING_EXECUTING_SQL_CMD: case QUERY_HOLDING_BY_RULE: case QUERY_HOLDING_BY_ADMIN: return FMT_HOLDING; break; case QUERY_COMPLETED: case QUERY_COMPLETED_HOLD_TIMEOUT: case QUERY_COMPLETED_EXEC_TIMEOUT: case QUERY_COMPLETED_BY_ADMIN: case QUERY_COMPLETED_BY_ADMIN_SERVER: case QUERY_COMPLETED_QUERY_NOT_FOUND: case QUERY_COMPLETED_CONNECTION_FAILED: case QUERY_COMPLETED_NDCS_PROCESS_FAILED: case QUERY_COMPLETED_CPU_FAILED: case QUERY_COMPLETED_SEGMENT_FAILED: case QUERY_COMPLETED_BY_RULE: case QUERY_COMPLETED_SERVICE_NOT_ACTIVE: case QUERY_COMPLETED_HARDWARE_FAILURE: case QUERY_COMPLETED_UNEXPECTED_STATE: case QUERY_COMPLETED_CLIENT_DISAPPEARED: case QUERY_COMPLETED_BY_CLIENT: // case QUERY_COMPLETED_NDCS_DLG_INIT: case QUERY_COMPLETED_NDCS_CONN_IDLE: case QUERY_COMPLETED_NDCS_DLG_TERM: case QUERY_COMPLETED_NDCS_DLG_BREAK: case QUERY_COMPLETED_NDCS_STOP_SRVR: case QUERY_COMPLETED_NDCS_RMS_ERROR: case QUERY_COMPLETED_NDCS_REPOS_IDLE: case QUERY_COMPLETED_NDCS_REPOS_INTERVAL: case QUERY_COMPLETED_NDCS_REPOS_PARTIAL: case QUERY_COMPLETED_NDCS_EXEC_INTERVAL: case QUERY_COMPLETED_NDCS_CONN_RULE_CHANGED: case QUERY_COMPLETED_NDCS_CLOSE: case QUERY_COMPLETED_NDCS_PREPARE: case QUERY_COMPLETED_NDCS_WMS_ERROR: return FMT_COMPLETED; break; case QUERY_REJECTED: case QUERY_REJECTED_BY_ADMIN: case QUERY_REJECTED_CONNECTION_FAILED: case QUERY_REJECTED_NDCS_PROCESS_FAILED: case QUERY_REJECTED_CPU_FAILED: case QUERY_REJECTED_SEGMENT_FAILED: case QUERY_REJECTED_QMSGCANCELLED: case QUERY_REJECTED_VERSION_MISMATCH: case QUERY_REJECTED_WMSONHOLD: case QUERY_REJECTED_MAX_QUERIES_REACHED: case QUERY_REJECTED_SERVICE_NOT_FOUND: case QUERY_REJECTED_SERVICE_ON_HOLD: case QUERY_REJECTED_BY_RULE: case QUERY_REJECTED_UNKNOWNUSER: case QUERY_REJECTED_UNEXPECTED_STATE: case QUERY_REJECTED_HOLD_TIMEOUT: case QUERY_REJECTED_WAIT_TIMEOUT: case QUERY_REJECTED_CLIENT_DISAPPEARED: case QUERY_REJECTED_LONG_TRANS_ABORTING: return FMT_REJECTED; break; case QUERY_SUSPENDED: case QUERY_SUSPENDED_BY_ADMIN: case QUERY_SUSPENDED_BY_RULE: case QUERY_SUSPENDED_CANCELED: case QUERY_SUSPENDED_CANCELED_BY_ADMIN: case QUERY_SUSPENDED_CANCELED_BY_RULE: case QUERY_SUSPENDED_CANCELED_BY_TIMEOUT: return FMT_SUSPENDED; break; default: return FMT_UNKNOWN; break; } } string getQuerySubStateStringRes(unsigned short state) { switch(state) { case QUERY_WAITING_MAX_CPU_BUSY: return FMT_MAX_CPU_BUSY; case QUERY_WAITING_EST_MAX_CPU_BUSY: return FMT_EST_MAX_CPU_BUSY; // case QUERY_WAITING_MAX_MEM_USAGE: return FMT_MAX_MEM_USAGE; // case QUERY_WAITING_RELEASED_BY_ADMIN: case QUERY_EXECUTING_RELEASED_BY_ADMIN: return FMT_RELEASED_BY_ADMIN; // case QUERY_EXECUTING_RELEASED_BY_RULE: return FMT_RELEASED_BY_EXEC_RULE; // case QUERY_EXECUTING_CANCEL_IN_PROGRESS: return FMT_CANCEL_IN_PROGRESS; // case QUERY_WAITING_MAX_SERVICE_EXEC_QUERIES: return FMT_MAX_SERVICE_EXEC_QUERIES; case QUERY_WAITING_MAX_INSTANCE_EXEC_QUERIES: return FMT_MAX_INSTANCE_EXEC_QUERIES; // case QUERY_WAITING_TXN_BACKOUT: return FMT_WAITING_TXN_BACKOUT; // case QUERY_WAITING_MAX_ESPS: return FMT_MAX_AVG_ESPS; case QUERY_WAITING_CANARY_EXEC: return FMT_WAITING_CANARY; // case QUERY_HOLDING_LOAD: return FMT_LOADING; // case QUERY_HOLDING_REPREPARING: return FMT_REPREPARING; // case QUERY_HOLDING_EXECUTING_SQL_CMD: return FMT_EXECUTING_SQL_CMD; // case QUERY_REJECTED_BY_RULE: case QUERY_HOLDING_BY_RULE: return FMT_BY_COMP_RULE; // case QUERY_SUSPENDED_BY_RULE: return FMT_BY_EXEC_RULE; // case QUERY_SUSPENDED_BY_ADMIN: case QUERY_REJECTED_BY_ADMIN: case QUERY_HOLDING_BY_ADMIN: return FMT_BY_ADMIN; // case QUERY_REJECTED_HOLD_TIMEOUT: case QUERY_COMPLETED_HOLD_TIMEOUT: return FMT_HOLD_TIMEOUT; // case QUERY_COMPLETED_EXEC_TIMEOUT: return FMT_EXEC_TIMEOUT; // case QUERY_COMPLETED_BY_ADMIN: return FMT_CANCELLED_BY_ADMIN; case QUERY_COMPLETED_BY_ADMIN_SERVER: return FMT_CANCELLED_BY_ADMIN_SERVER; case QUERY_COMPLETED_BY_CLIENT: return FMT_CANCELLED_BY_CLIENT; // case QUERY_COMPLETED_QUERY_NOT_FOUND: return FMT_QUERY_NOT_FOUND; // case QUERY_REJECTED_CONNECTION_FAILED: case QUERY_COMPLETED_CONNECTION_FAILED: return FMT_CONNECTION_FAILED; // case QUERY_REJECTED_NDCS_PROCESS_FAILED: case QUERY_COMPLETED_NDCS_PROCESS_FAILED: return FMT_NDCS_PROCESS_FAILED; // case QUERY_REJECTED_CPU_FAILED: case QUERY_COMPLETED_CPU_FAILED: return FMT_CPU_FAILED; // case QUERY_REJECTED_SEGMENT_FAILED: case QUERY_COMPLETED_SEGMENT_FAILED: return FMT_SEGMENT_FAILED; // case QUERY_COMPLETED_BY_RULE: return FMT_BY_EXEC_RULE; // case QUERY_COMPLETED_SERVICE_NOT_ACTIVE: return FMT_SERVICE_NOT_ACTIVE; // case QUERY_REJECTED_UNEXPECTED_STATE: case QUERY_COMPLETED_UNEXPECTED_STATE: return FMT_UNEXPECTED_STATE; // case QUERY_REJECTED_CLIENT_DISAPPEARED: case QUERY_COMPLETED_CLIENT_DISAPPEARED: return FMT_CLIENT_DISAPPEARED; // case QUERY_REJECTED_QMSGCANCELLED: return FMT_QUEUE_MSG_CANCELLED; // case QUERY_REJECTED_VERSION_MISMATCH: return FMT_VERSION_MISMATCH; // case QUERY_REJECTED_WMSONHOLD: return FMT_WMS_ON_HOLD; // case QUERY_REJECTED_MAX_QUERIES_REACHED: return FMT_MAX_QUERIES_REACHED; // case QUERY_REJECTED_SERVICE_NOT_FOUND: return FMT_SERVICE_NOT_FOUND; // case QUERY_REJECTED_SERVICE_ON_HOLD: return FMT_SERVICE_ON_HOLD; // case QUERY_REJECTED_UNKNOWNUSER: return FMT_UNKNOWN_USER; // case QUERY_REJECTED_WAIT_TIMEOUT: return FMT_WAIT_TIMEOUT; // case QUERY_SUSPENDED_CANCELED: return FMT_QUERY_CANCELED; case QUERY_SUSPENDED_CANCELED_BY_RULE: return FMT_QUERY_CANCELED_BY_RULE; case QUERY_SUSPENDED_CANCELED_BY_ADMIN: return FMT_QUERY_CANCELED_BY_ADMIN; case QUERY_SUSPENDED_CANCELED_BY_TIMEOUT: return FMT_QUERY_CANCELED_BY_TIMEOUT; // case QUERY_COMPLETED_NDCS_DLG_INIT: return FMT_NDCS_DLG_INIT; case QUERY_COMPLETED_NDCS_CONN_IDLE: return FMT_NDCS_CONN_IDLE; case QUERY_COMPLETED_NDCS_DLG_TERM: return FMT_NDCS_DLG_TERM; case QUERY_COMPLETED_NDCS_DLG_BREAK: return FMT_NDCS_DLG_BREAK; case QUERY_COMPLETED_NDCS_STOP_SRVR: return FMT_NDCS_STOP_SRVR; case QUERY_COMPLETED_NDCS_RMS_ERROR: return FMT_NDCS_RMS_ERROR; case QUERY_COMPLETED_NDCS_REPOS_IDLE: return FMT_NDCS_REPOS_IDLE; case QUERY_COMPLETED_NDCS_REPOS_INTERVAL: return FMT_NDCS_REPOS_INTERVAL; case QUERY_COMPLETED_NDCS_REPOS_PARTIAL: return FMT_NDCS_REPOS_PARTIAL; case QUERY_COMPLETED_NDCS_EXEC_INTERVAL: return FMT_NDCS_EXEC_INTERVAL; case QUERY_COMPLETED_NDCS_CONN_RULE_CHANGED: return FMT_NDCS_CONN_RULE_CHANGED; case QUERY_COMPLETED_NDCS_CLOSE: return FMT_NDCS_CLOSE; case QUERY_COMPLETED_NDCS_PREPARE: return FMT_NDCS_PREPARE; case QUERY_COMPLETED_NDCS_WMS_ERROR: return FMT_NDCS_WMS_ERROR; case QUERY_REJECTED_LONG_TRANS_ABORTING: return FMT_LONG_TRANS_ABORTING; default: return FMT_NA; } } string get_WarnLevelStringRes(unsigned short value) { if(WLVL_HIGH & value) return FMT_WLVL_HIGH; else if(WLVL_MEDIUM & value) return FMT_WLVL_MEDIUM; else if(WLVL_LOW & value) return FMT_WLVL_LOW; else if(WLVL_NO_WARN & value) return FMT_WLVL_NO_WARN; else return FMT_NONE; } void ResStatisticsStatement::start(Int32 inState, Int32 inSqlQueryType, const char *inStmtName, SRVR_STMT_HDL *pSrvrStmt, double inEstimatedCost, bool *flag_21036, /* Int32 *returnCode, Int32 *sqlWarningOrErrorLength, BYTE * &sqlWarningOrError, */ char *inSqlStatement) { char *inQueryId = NULL; SQL_QUERY_COST_INFO cost_info; SQL_QUERY_COMPILER_STATS_INFO comp_stats_info; SQL_COMPILATION_STATS_DATA comp_stats_data; unsigned short queryState; unsigned short warnLevel; int64 holdTime = 0, waitTime = 0, WMSstartTS = 0; Int32 inSqlNewQueryType = inSqlQueryType; if(inState == STMTSTAT_PREPARE || inState == STMTSTAT_EXECDIRECT) { statementId[0] = '\0'; totalStatementOdbcElapseTime = 0; totalStatementOdbcExecutionTime = 0; totalStatementExecutes = 0; queryElapseTime = 0; queryExecutionTime = 0; } if (inStmtName == NULL) strcpy(statementId,"<NULL>"); else if (inStmtName[0]==0) strcpy(statementId,"<EMPTY>"); else strcpy(statementId,inStmtName); // perf if (pSrvrStmt) init(); if (catFlagOn == true) return; statementStartTime = JULIANTIMESTAMP(); if(inState == STMTSTAT_PREPARE) prepareStartTime = statementStartTime; // get cpu time ( Process_getInfo) statementStartCpuTime = getCpuTime(); if(inState==STMTSTAT_EXECUTE||inState == STMTSTAT_PREPARE) { queryStartTime = statementStartTime; queryStartCpuTime = statementStartCpuTime; } if (pSrvrStmt != NULL) { inQueryId = pSrvrStmt->sqlUniqueQueryID; cost_info = pSrvrStmt->cost_info; comp_stats_info = pSrvrStmt->comp_stats_info; comp_stats_data = comp_stats_info.compilationStats; queryState = pSrvrStmt->m_state; warnLevel = pSrvrStmt->m_warnLevel; holdTime = pSrvrStmt->m_hold_time; waitTime = pSrvrStmt->m_wait_time; WMSstartTS = pSrvrStmt->m_WMSstart_ts; bzero(con_rule_name, sizeof(con_rule_name)); bzero(cmp_rule_name, sizeof(cmp_rule_name)); bzero(exe_rule_name, sizeof(exe_rule_name)); memcpy(con_rule_name, pSrvrStmt->m_con_rule_name, sizeof(con_rule_name)); memcpy(cmp_rule_name, pSrvrStmt->m_cmp_rule_name,sizeof(cmp_rule_name)); memcpy(exe_rule_name, pSrvrStmt->m_exe_rule_name,sizeof(exe_rule_name)); if (comp_stats_info.dop > 1) estTotalMem = cost_info.estimatedTotalMem * comp_stats_info.dop; else estTotalMem = cost_info.estimatedTotalMem; pSrvrStmt->queryStartTime = queryStartTime; pSrvrStmt->queryStartCpuTime = queryStartCpuTime; inSqlNewQueryType = pSrvrStmt->sqlNewQueryType; if (comp_stats_data.compilerId[0] == 0) strcpy(comp_stats_data.compilerId,"<N/A>"); //if (comp_stats_data.compileInfoLen <= 0 || comp_stats_data.compileInfo[0] == 0) // strcpy(comp_stats_data.compileInfo,"<N/A>"); } sequenceNumber = 0; totalmsgNumber = 0; if ((srvrGlobal->resourceStatistics & STMTSTAT_SQL) && (inState == STMTSTAT_PREPARE || inState == STMTSTAT_EXECDIRECT)) { if (inSqlStatement != NULL) { markNewOperator,sqlStatement = new char[strlen(inSqlStatement) + 1]; strcpy(sqlStatement,inSqlStatement); } else { markNewOperator,sqlStatement = new char[5]; strcpy(sqlStatement,"NULL"); } if (sqlStatement != NULL) { #ifdef RES_STATS_EVENT int len = min((int)strlen(sqlStatement), 254); string sqlString(sqlStatement, len); stringstream ssEvent; ssEvent << "START:SQLStatement: StatementId: " << statementId << ", SqlText: " << sqlString; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif delete sqlStatement; sqlStatement = NULL; } else { strcpy(tmpString, "Error in allocating memory for sqlStatement"); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); return; } } sequenceNumber = 0; totalmsgNumber = 0; sprintf(sequenceNumberStr,"%d/%d",sequenceNumber,totalmsgNumber); if ((srvrGlobal->resourceStatistics & STMTSTAT_PREPARE) && (inState == STMTSTAT_PREPARE)) { if (inSqlStatement != NULL) { markNewOperator,sqlStatement = new char[strlen(inSqlStatement) + 1]; strcpy(sqlStatement,inSqlStatement); } else { markNewOperator,sqlStatement = new char[5]; strcpy(sqlStatement,"NULL"); } if (sqlStatement != NULL) { #ifdef RES_STATS_EVENT stringstream ssEvent; ssEvent << "START:SQLPrepare: StatementId: " << statementId << ", StatementType: " << getStatementType(inSqlNewQueryType); SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif delete sqlStatement; sqlStatement = NULL; } else { strcpy(tmpString, "Error in allocating memory for sqlStatement"); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); return; } } if (inQueryId == NULL) strcpy(queryId, "<NULL>"); else if (inQueryId[0] == 0) strcpy(queryId, "<EMPTY>"); else strcpy(queryId, inQueryId); // Always enable 21036 // if ((srvrGlobal->resourceStatistics & STMTSTAT_EXECUTE) && (inState == STMTSTAT_EXECUTE)) if (inState == STMTSTAT_EXECUTE) { estimatedCost = inEstimatedCost; if (inSqlStatement != NULL) { markNewOperator,sqlStatement = new char[strlen(inSqlStatement) + 1]; strcpy(sqlStatement,inSqlStatement); } else { markNewOperator,sqlStatement = new char[5]; strcpy(sqlStatement,"NULL"); } if (sqlStatement != NULL) { if (pSrvrStmt->bLowCost == false) { if (queryState == QUERY_EXECUTING) queryState = QUERY_INIT; *flag_21036 = true; stringstream ss2; ss2 << "File: " << __FILE__ << ", Fuction: " << __FUNCTION__ << ", Line: " << __LINE__ << ", QID: " << queryId; /* SetQueryStatsInfoSqlText( (const char *)ss2.str().c_str() //"ResStatisticsStatement::start():" , (const char *)queryId , queryStartTime , (const char *)inSqlStatement ); */ } delete sqlStatement; sqlStatement = NULL; } else { strcpy(tmpString, "Error in allocating memory for sqlStatement"); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); return; } if (srvrGlobal->resourceStatistics & STMTSTAT_EXECUTE) { #ifdef RES_STATS_EVENT stringstream ssEvent; ssEvent << "START:SQLExecute: StatementId: " << statementId << ", QueryId: " << queryId; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif } } if ((srvrGlobal->resourceStatistics & STMTSTAT_FETCH) && (inState == STMTSTAT_FETCH)) { #ifdef RES_STATS_EVENT stringstream ssEvent; ssEvent << "START:SQLFetch: StatementId: " << statementId << ", QueryId: " << queryId; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif } } void ResStatisticsStatement::end(Int32 inState, Int32 inSqlQueryType, short inStmtType, char *inQueryId, double inEstimatedCost, char *inSqlStatement, Int32 inErrorStatement, Int32 inWarningStatement, int64 inRowCount, Int32 inErrorCode, ResStatisticsSession *resStatSession, Int32 inSqlErrorLength, char *inSqlError, SRVR_STMT_HDL *pSrvrStmt, bool *flag_21036, Int32 inSqlNewQueryType, char isClosed) { if (catFlagOn == true) return; statementEndTime = JULIANTIMESTAMP(); // get cpu time (Process_getInfo) statementEndCpuTime = getCpuTime(); //Calculate Elapsed and Execution time odbcElapseTime = statementEndTime - statementStartTime; odbcExecutionTime = statementEndCpuTime - statementStartCpuTime; ps.odbcElapseTime = odbcElapseTime; ps.odbcExecutionTime = odbcExecutionTime; ps.state = inState; stmtType = inStmtType; ps.stmtType = inStmtType; ps.sqlNewQueryType = inSqlNewQueryType; estimatedCost = inEstimatedCost; if (inQueryId == NULL) strcpy(queryId, "<NULL>"); else if (inQueryId[0] == 0) strcpy(queryId, "<EMPTY>"); else strcpy(queryId, inQueryId); ps.errorStatement = inErrorStatement; ps.warningStatement = inWarningStatement; numberOfRows = inRowCount; totalStatementOdbcElapseTime = totalStatementOdbcElapseTime + odbcElapseTime; totalStatementOdbcExecutionTime = totalStatementOdbcExecutionTime + odbcExecutionTime; errorCode = inErrorCode; if(inState == STMTSTAT_EXECUTE || inState == STMTSTAT_EXECDIRECT) totalStatementExecutes ++; else if(inState == STMTSTAT_PREPARE && pSrvrStmt != NULL) { // now get compile stats from RMS setStatistics(pSrvrStmt); if (statsErrorCode == 0 and sqlErrorCode == 0) { prepareStartTime = compStartTime; prepareEndTime = compEndTime; prepareTime = compTime; } else { prepareEndTime = statementEndTime; prepareTime = prepareEndTime - prepareStartTime; } } sequenceNumber = 0; totalmsgNumber = 0; sprintf(sequenceNumberStr,"%d/%d",sequenceNumber,totalmsgNumber); if ((srvrGlobal->resourceStatistics & STMTSTAT_PREPARE) && (inState == STMTSTAT_PREPARE)) { if (inSqlStatement != NULL) { markNewOperator,sqlStatement = new char[strlen(inSqlStatement) + 1]; strcpy(sqlStatement,inSqlStatement); } else { markNewOperator,sqlStatement = new char[5]; strcpy(sqlStatement,"NULL"); } if (sqlStatement != NULL) { #ifdef RES_STATS_EVENT stringstream ssEvent; ssEvent << "END:SQLPrepare: StatementId: " << statementId << ", QueryId: " << queryId << ", EstimatedCost: " << estimatedCost << ", StatementType: " << getStatementType(inSqlNewQueryType) << ", SQLCompileTime: " << ps.odbcElapseTime << ", ErrorCode: " << errorCode; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif delete sqlStatement; sqlStatement = NULL; } else { strcpy(tmpString, "Error in allocating memory for sqlStatement"); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); return; } } // if statement is on sequenceNumber = 0; totalmsgNumber = 0; sprintf(sequenceNumberStr,"%d/%d",sequenceNumber,totalmsgNumber); if ((srvrGlobal->resourceStatistics & STMTSTAT_SQL) && (inState == STMTSTAT_PREPARE || inState == STMTSTAT_EXECDIRECT)) { if (inSqlStatement != NULL) { markNewOperator,sqlStatement = new char[strlen(inSqlStatement) + 1]; strcpy(sqlStatement,inSqlStatement); } else { markNewOperator,sqlStatement = new char[5]; strcpy(sqlStatement,"NULL"); } if (sqlStatement != NULL) { #ifdef RES_STATS_EVENT int len = min((int)strlen(sqlStatement), 254); string sqlString(sqlStatement, len); stringstream ssEvent; ssEvent << "END:SQLStatement: StatementId: " << statementId << ", QueryId: " << queryId << ", SqlText: " << sqlString; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif delete sqlStatement; sqlStatement = NULL; } else { strcpy(tmpString, "Error in allocating memory for sqlStatement"); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); return; } } // Always enable 21036 // if ((srvrGlobal->resourceStatistics & STMTSTAT_EXECUTE) && (inState == STMTSTAT_EXECUTE)) if (inState == STMTSTAT_EXECUTE) { pSrvrStmt->queryEndTime = statementEndTime; pSrvrStmt->queryEndCpuTime = statementEndCpuTime; queryElapseTime = statementEndTime - pSrvrStmt->queryStartTime; queryExecutionTime = statementEndCpuTime - pSrvrStmt->queryStartCpuTime; if (stmtType == TYPE_INSERT || stmtType == TYPE_INSERT_PARAM || stmtType == TYPE_DELETE || stmtType == TYPE_UPDATE || inSqlQueryType == SQL_SELECT_UNIQUE) { #ifdef RES_STATS_EVENT stringstream ssEvent; ssEvent << "END:SQLExecute: StatementId: " << statementId << ", QueryId: " << queryId << ", StatementType: " << getStatementType(inSqlNewQueryType) << ", ClientId: " << resCollectinfo.clientId << ", UserName: " << resCollectinfo.userName << ", UserId: " << resCollectinfo.userId << ", ApplicationId: " << resCollectinfo.applicationId << ", NodeName: " << resCollectinfo.nodeName << ", CpuPin: " << resCollectinfo.cpuPin << ", DSName: " << resCollectinfo.DSName << ", Time: " << statementEndTime << ", ODBCElapsedTime: " << ps.odbcElapseTime << ", ODBCExecutionTime: " << ps.odbcExecutionTime << ", NumberOfRows: " << numberOfRows << ", ErrorCode: " << errorCode << ", RowsAccessed: " << AccessedRows << ", RowsRetrieved: " << UsedRows << ", DiscReads: " << DiskIOs << ", MsgsToDisc: " << NumMessages << ", MsgsBytesToDisc: " << MessagesBytes << ", LockWaits: " << LockWaits << ", LockEscalation: " << Escalations << ", TotalExecutes: <N/A>" ; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif } else { if (srvrGlobal->resourceStatistics & STMTSTAT_EXECUTE) { #ifdef RES_STATS_EVENT stringstream ssEvent; ssEvent << "END:SQLExecute: StatementId: " << statementId << ", QueryId: " << queryId << ", StatementType: " << getStatementType(inSqlNewQueryType) << ", ClientId: " << resCollectinfo.clientId << ", UserName: " << resCollectinfo.userName << ", UserId: " << resCollectinfo.userId << ", ApplicationId: " << resCollectinfo.applicationId << ", NodeName: " << resCollectinfo.nodeName << ", CpuPin: " << resCollectinfo.cpuPin << ", DSName: " << resCollectinfo.DSName << ", Time: " << statementEndTime << ", ODBCElapsedTime: " << ps.odbcElapseTime << ", ODBCExecutionTime: " << ps.odbcExecutionTime << ", NumberOfRows:<N/A>" << ", ErrorCode: " << errorCode << ", RowsAccessed:<N/A>" << ", RowsRetrieved:<N/A>" << ", DiscReads:<N/A>" << ", MsgsToDisc:<N/A>" << ", MsgsBytesToDisc:<N/A>" << ", LockWaits:<N/A>" << ", LockEscalation:<N/A>" << ", TotalExecutes:<N/A>" ; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif } } if (srvrGlobal->resourceStatistics & STMTSTAT_EXECUTE) { } // delay generating 21036 message until qrysrvc_ExecuteFinished is called } // Always enable 21036 // if ((srvrGlobal->resourceStatistics & STMTSTAT_FETCH) && (inState == STMTSTAT_FETCH || inState == STMTSTAT_CLOSE)) if (inState == STMTSTAT_FETCH || inState == STMTSTAT_CLOSE) { pSrvrStmt->queryEndTime = statementEndTime; pSrvrStmt->queryEndCpuTime = statementEndCpuTime; queryElapseTime = statementEndTime - pSrvrStmt->queryStartTime; queryExecutionTime = statementEndCpuTime - pSrvrStmt->queryStartCpuTime; statStatisticsFlag = FALSE; if (srvrGlobal->resourceStatistics & STMTSTAT_FETCH) { #ifdef RES_STATS_EVENT stringstream ssEvent; ssEvent << "END:SQLFetch: StatementId: " << statementId << ", StatementType: " << getStatementType(inSqlNewQueryType) << ", ClientId: " << resCollectinfo.clientId << ", UserName: " << resCollectinfo.userName << ", UserId: " << resCollectinfo.userId << ", ApplicationId: " << resCollectinfo.applicationId << ", NodeName: " << resCollectinfo.nodeName << ", CpuPin: " << resCollectinfo.cpuPin << ", DSName: " << resCollectinfo.DSName << ", Time: " << statementEndTime << ", ODBCElapsedTime: " << totalStatementOdbcElapseTime << ", ODBCExecutionTime: " << totalStatementOdbcElapseTime << ", NumberOfRows: <N/A>" << ", ErrorCode: " << errorCode << ", RowsAccessed: " << AccessedRows << ", RowsRetrieved: " << UsedRows << ", DiscReads: " << DiskIOs << ", MsgsToDisc: " << NumMessages << ", MsgsBytesToDisc: " << MessagesBytes << ", LockWaits: " << LockWaits << ", LockEscalation: " << Escalations << ", TotalExecutes: " << totalStatementExecutes ; SendEventMsg(MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", ssEvent.str().c_str()); #endif } // delay writing of 21036 msg to dashboard } // if ((srvrGlobal->resourceStatistics & STMTSTAT_SQL) || (srvrGlobal->resourceStatistics & STMTSTAT_PREPARE) || (srvrGlobal->resourceStatistics & STMTSTAT_EXECUTE) || (srvrGlobal->resourceStatistics & STMTSTAT_EXECDIRECT) || (srvrGlobal->resourceStatistics & STMTSTAT_FETCH)) // Check for lower bits to enable session // to always enable 21036 if (resStatSession != NULL) resStatSession->accumulateStatistics(&ps); } static void initSqlStatsItems(SQLSTATS_ITEM *sqlStatsItem, short noOfStatsItem, SQLSTATS_DESC* sqlStatsDesc_) { for (int i = 0; i < noOfStatsItem; i++) { sqlStatsItem[i].stats_type = sqlStatsDesc_->stats_type; sqlStatsItem[i].tdb_id = sqlStatsDesc_->tdb_id; sqlStatsItem[i].str_value = NULL; } } void ResStatisticsStatement::init_rms_counters(bool resetAll) { bzero(parentQID, sizeof(parentQID)); bzero(childQID, sizeof(childQID)); bzero(sqlSrc, sizeof(sqlSrc)); bzero(subQryType, sizeof(subQryType)); bzero(parentSysName, sizeof(parentSysName)); sqlSrcLen = 0; // row 2 col value resets NumMessages = 0; MessagesBytes = 0; AccessedRows = 0; UsedRows = 0; DiskIOs = 0; Escalations = 0; LockWaits = 0; Opens = 0; OpenTime = 0; StatsBytes = 0; ProcessBusyTime = 0; DiskProcessBusyTime = 0; NewProcess = 0; NewProcessTime = 0; SpaceTotal = 0; SpaceUsed = 0; HeapTotal = 0; HeapUsed = 0; CpuTime = 0; Dp2SpaceTotal = 0; Dp2SpaceUsed = 0; Dp2HeapTotal = 0; Dp2HeapUsed = 0; reqMsgCnt = 0; reqMsgBytes = 0; replyMsgCnt = 0; replyMsgBytes = 0; TotalMemAlloc = 0; MaxMemUsed = 0; // row 1 col value reset compStartTime = 0; compEndTime = 0; compTime = 0; exeStartTime = 0; exeEndTime = 0; exeTime = 0; state = 0; if( resetAll ) { statsErrorCode = 0; rowsAffected = 0; } sqlErrorCode = 0; estRowsAccessed = 0.0f; estRowsUsed = 0.0f; exeElapsedTime = 0; numSqlProcs = 0; numCpus = 0; sqlSrcLen = 0; exePriority = 0; rowsReturned = 0; NumRowsIUD = 0; firstRowReturnTime = 0; Timeouts = 0; NumSorts = 0; SortElapsedTime = 0; StatsRowType = 0; AQRlastError = 0; AQRnumRetries = 0; AQRdelayBeforeRetry = 0; UdrCpuTime = 0; ScratchFileCount = 0; ScratchBufferBlockSize = 0; ScratchBufferBlocksRead = 0; ScratchBufferBlocksWritten = 0; ScratchOverflowMode = 0; ScratchBufferReadCount = 0; ScratchBufferWriteCount = 0; } void ResStatisticsStatement::setStatistics(SRVR_STMT_HDL *pSrvrStmt, SQLSTATS_TYPE statsType, char *qID, short qIdLen ,int activeQueryNum)//20111208 { #define MAX_ACCUMULATED_STATS_DESC 2 #define MAX_PERTABLE_STATS_DESC 30 #define MAX_MASTERSTATS_ENTRY 31 #define MAX_MEASSTATS_ENTRY 34 #define MAX_PERTABLE_ENTRY 14 int i; Int32 cliRC; SQLSTATS_DESC* sqlStatsDesc_ = NULL; Int32 maxStatsDescEntries_; short statsCollectType_; Int32 retStatsDescEntries_; short currStatsDescEntry_ = 0; short currPerTblEntry_ = 0; short reqType; short mergeType; SQLSTATS_ITEM* masterStatsItems_ = NULL; SQLSTATS_ITEM* measStatsItems_ = NULL; SQLSTATS_ITEM* pertableStatsItems_ = NULL; char* qrid_ = NULL; char* parentid_ = NULL; char* childid_ = NULL; char* rmsSqlSource_ = NULL; char* tblName_ = NULL; char* subQueryType_ = NULL; char* parentSystem_ = NULL; // To pass either query ID or statement name to CLI // Default is CURRENT if pSrvrStmt is NULL. // char *reqStr = NULL; short reqStrLen = 0; char *reqStrCurrent = "CURRENT"; if (!srvrGlobal->m_bStatisticsEnabled) return; if (pSrvrStmt != NULL) pSrvrStmt->m_mxsrvr_substate = NDCS_INIT; // Fix for bugzilla #2388 // For queries having STATS_COLLECTION_TYPE = NO_STATS we should not be calling // SQL_EXEC_GetStatistics2() since no stats are collected for these queries. Moreover, for // some (NO_STATS) queries like 'get statistics for <qid>' etc. calling SQL_EXEC_GetStatistics2() with // reqType=SQLCLI_STATS_REQ_QID_CURRENT will return stats collected for a previously executed query. if (statsType == SQLCLI_ACCUMULATED_STATS && pSrvrStmt != NULL) { // Populate the short query text if (pSrvrStmt->comp_stats_info.statsCollectionType == SQLCLI_NO_STATS && pSrvrStmt->comp_stats_info.compilationStats.compilerId[0] != 0) { if (pSrvrStmt->sqlString != NULL && pSrvrStmt->m_bNewQueryId == true ) { int len = pSrvrStmt->sqlStringLen > RMS_STORE_SQL_SOURCE_LEN ? RMS_STORE_SQL_SOURCE_LEN : strlen(pSrvrStmt->sqlString); bzero (pSrvrStmt->m_shortQueryText, sizeof(pSrvrStmt->m_shortQueryText)); pSrvrStmt->m_rmsSqlSourceLen = pSrvrStmt->sqlStringLen; translateToUTF8(srvrGlobal->isoMapping, pSrvrStmt->sqlString, len, pSrvrStmt->m_shortQueryText, RMS_STORE_SQL_SOURCE_LEN); pSrvrStmt->m_bNewQueryId = false; } init_rms_counters(); return; } } if (qIdLen != 0) { reqStr = qID; reqStrLen = qIdLen; } switch( statsType ) { case SQLCLI_ACCUMULATED_STATS : maxStatsDescEntries_ = MAX_ACCUMULATED_STATS_DESC; mergeType = SQLCLI_ACCUMULATED_STATS; if( qIdLen !=0 ) reqType = SQLCLI_STATS_REQ_QID ; else { if (pSrvrStmt != NULL && pSrvrStmt->stmtName != NULL && pSrvrStmt->stmtNameLen > 0) { reqType = SQLCLI_STATS_REQ_STMT; reqStr = pSrvrStmt->stmtName; reqStrLen = pSrvrStmt->stmtNameLen; } else { reqType = SQLCLI_STATS_REQ_QID_CURRENT; reqStr = reqStrCurrent; reqStrLen = strlen(reqStrCurrent); } } break; case SQLCLI_PERTABLE_STATS : maxStatsDescEntries_ = MAX_PERTABLE_STATS_DESC; reqType = SQLCLI_STATS_REQ_QID; mergeType = SQLCLI_PERTABLE_STATS; if( !qID ) ; // error break; } try { bzero(perTableStats, sizeof(perTableStats)); perTableRowSize = 0; sqlStatsDesc_ = new (nothrow) SQLSTATS_DESC[maxStatsDescEntries_]; if (sqlStatsDesc_ == NULL) { cliRC = 990; throw("error");} cliRC = SQL_EXEC_GetStatistics2( reqType, reqStr, reqStrLen, activeQueryNum, mergeType, &statsCollectType_, sqlStatsDesc_, maxStatsDescEntries_, &retStatsDescEntries_); if (cliRC != 0) throw("error"); if (retStatsDescEntries_ <= 0) { cliRC = 991; throw("error");} while (currStatsDescEntry_ < retStatsDescEntries_) { switch (sqlStatsDesc_[currStatsDescEntry_].stats_type) { case SQLSTATS_DESC_MASTER_STATS: masterStatsItems_ = new (nothrow) SQLSTATS_ITEM[MAX_MASTERSTATS_ENTRY]; if (masterStatsItems_ == NULL) { cliRC = 992; throw ("error");} bzero(masterStatsItems_, sizeof(SQLSTATS_ITEM)*MAX_MASTERSTATS_ENTRY); initSqlStatsItems(masterStatsItems_, MAX_MASTERSTATS_ENTRY, &sqlStatsDesc_[currStatsDescEntry_]); masterStatsItems_[0].statsItem_id = SQLSTATS_QUERY_ID; masterStatsItems_[1].statsItem_id = SQLSTATS_COMP_START_TIME; masterStatsItems_[2].statsItem_id = SQLSTATS_COMP_END_TIME; masterStatsItems_[3].statsItem_id = SQLSTATS_COMP_TIME; masterStatsItems_[4].statsItem_id = SQLSTATS_EXECUTE_START_TIME; masterStatsItems_[5].statsItem_id = SQLSTATS_FIRST_ROW_RET_TIME; masterStatsItems_[6].statsItem_id = SQLSTATS_EXECUTE_END_TIME; masterStatsItems_[7].statsItem_id = SQLSTATS_EXECUTE_TIME; masterStatsItems_[8].statsItem_id = SQLSTATS_STMT_STATE; masterStatsItems_[9].statsItem_id = SQLSTATS_STATS_ERROR_CODE; masterStatsItems_[10].statsItem_id = SQLSTATS_SQL_ERROR_CODE; masterStatsItems_[11].statsItem_id = SQLSTATS_QUERY_TYPE; masterStatsItems_[12].statsItem_id = SQLSTATS_ROWS_RETURNED; masterStatsItems_[13].statsItem_id = SQLSTATS_EST_ROWS_ACCESSED; masterStatsItems_[14].statsItem_id = SQLSTATS_EST_ROWS_USED; masterStatsItems_[15].statsItem_id = SQLSTATS_FIXUP_TIME; masterStatsItems_[16].statsItem_id = SQLSTATS_PARENT_QUERY_ID; // new col masterStatsItems_[17].statsItem_id = SQLSTATS_NUM_SQLPROCS; masterStatsItems_[18].statsItem_id = SQLSTATS_NUM_CPUS; masterStatsItems_[19].statsItem_id = SQLSTATS_SOURCE_STR; masterStatsItems_[20].statsItem_id = SQLSTATS_SOURCE_STR_LEN; masterStatsItems_[21].statsItem_id = SQLSTATS_MASTER_PRIORITY; masterStatsItems_[22].statsItem_id = SQLSTATS_TRANSID; masterStatsItems_[23].statsItem_id = SQLSTATS_AQR_LAST_ERROR; masterStatsItems_[24].statsItem_id = SQLSTATS_AQR_NUM_RETRIES; masterStatsItems_[25].statsItem_id = SQLSTATS_AQR_DELAY_BEFORE_RETRY; masterStatsItems_[26].statsItem_id = SQLSTATS_ROWS_AFFECTED; masterStatsItems_[27].statsItem_id = SQLSTATS_CHILD_QUERY_ID; // new col masterStatsItems_[28].statsItem_id = SQLSTATS_RECLAIM_SPACE_COUNT; masterStatsItems_[29].statsItem_id = SQLSTATS_SUBQUERY_TYPE; masterStatsItems_[30].statsItem_id = SQLSTATS_PARENT_QUERY_SYSTEM; // MAX_MASTERSTATS_ENTRY is set to 31 qrid_ = masterStatsItems_[0].str_value = new (nothrow) char[MAX_QUERY_ID_LEN+1]; if (qrid_ == NULL) { cliRC = 993; throw ("error");} bzero(qrid_, MAX_QUERY_ID_LEN+1); masterStatsItems_[0].str_max_len = MAX_QUERY_ID_LEN; parentid_ = masterStatsItems_[16].str_value = new (nothrow) char[MAX_QUERY_ID_LEN+1]; if (parentid_ == NULL) { cliRC = 994; throw ("error");} bzero(parentid_, MAX_QUERY_ID_LEN+1 ); masterStatsItems_[16].str_max_len = MAX_QUERY_ID_LEN; childid_ = masterStatsItems_[27].str_value = new (nothrow) char[MAX_QUERY_ID_LEN+1]; if (childid_ == NULL) { cliRC = 995; throw ("error");} bzero(childid_, MAX_QUERY_ID_LEN+1); masterStatsItems_[27].str_max_len = MAX_QUERY_ID_LEN; rmsSqlSource_ = masterStatsItems_[19].str_value = new (nothrow) char[RMS_STORE_SQL_SOURCE_LEN+2]; if (rmsSqlSource_ == NULL) { cliRC = 996; throw ("error");} bzero(rmsSqlSource_, RMS_STORE_SQL_SOURCE_LEN+2); masterStatsItems_[19].str_max_len = RMS_STORE_SQL_SOURCE_LEN; subQueryType_ = masterStatsItems_[29].str_value = new (nothrow) char[SUB_QRY_TYPE_LEN+1]; if (subQueryType_ == NULL) { cliRC = 1000; throw ("error");} bzero(subQueryType_, SUB_QRY_TYPE_LEN+1); masterStatsItems_[29].str_max_len = SUB_QRY_TYPE_LEN; parentSystem_ = masterStatsItems_[30].str_value = new (nothrow) char[PAR_SYS_NAME_LEN+1]; if (parentSystem_ == NULL) { cliRC = 1001; throw ("error");} bzero(parentSystem_, PAR_SYS_NAME_LEN+1); masterStatsItems_[30].str_max_len = PAR_SYS_NAME_LEN; cliRC = SQL_EXEC_GetStatisticsItems( reqType, reqStr, reqStrLen, MAX_MASTERSTATS_ENTRY, masterStatsItems_); if (cliRC != 0) throw("error"); break; case SQLSTATS_DESC_MEAS_STATS: measStatsItems_ = new (nothrow) SQLSTATS_ITEM[MAX_MEASSTATS_ENTRY]; if (measStatsItems_ == NULL) { cliRC = 997; throw ("error");} bzero(measStatsItems_, sizeof(SQLSTATS_ITEM)*MAX_MEASSTATS_ENTRY); initSqlStatsItems(measStatsItems_, MAX_MEASSTATS_ENTRY, &sqlStatsDesc_[currStatsDescEntry_]); measStatsItems_[0].statsItem_id = SQLSTATS_ACT_ROWS_ACCESSED; measStatsItems_[1].statsItem_id = SQLSTATS_ACT_ROWS_USED; measStatsItems_[2].statsItem_id = SQLSTATS_MSG_COUNT; measStatsItems_[3].statsItem_id = SQLSTATS_MSG_BYTES; measStatsItems_[4].statsItem_id = SQLSTATS_STATS_BYTES; measStatsItems_[5].statsItem_id = SQLSTATS_DISK_IOS; measStatsItems_[6].statsItem_id = SQLSTATS_LOCK_WAITS; measStatsItems_[7].statsItem_id = SQLSTATS_LOCK_ESCALATIONS; measStatsItems_[8].statsItem_id = SQLSTATS_DP2_CPU_BUSY_TIME; measStatsItems_[9].statsItem_id = SQLSTATS_SQL_CPU_BUSY_TIME; measStatsItems_[10].statsItem_id = SQLSTATS_SQL_SPACE_ALLOC; measStatsItems_[11].statsItem_id = SQLSTATS_SQL_SPACE_USED; measStatsItems_[12].statsItem_id = SQLSTATS_SQL_HEAP_ALLOC; measStatsItems_[13].statsItem_id = SQLSTATS_SQL_HEAP_USED; measStatsItems_[14].statsItem_id = SQLSTATS_EID_SPACE_ALLOC; measStatsItems_[15].statsItem_id = SQLSTATS_EID_SPACE_USED; measStatsItems_[16].statsItem_id = SQLSTATS_EID_HEAP_ALLOC; measStatsItems_[17].statsItem_id = SQLSTATS_EID_HEAP_USED; measStatsItems_[18].statsItem_id = SQLSTATS_OPENS; measStatsItems_[19].statsItem_id = SQLSTATS_OPEN_TIME; measStatsItems_[20].statsItem_id = SQLSTATS_PROCESS_CREATED; measStatsItems_[21].statsItem_id = SQLSTATS_PROCESS_CREATE_TIME; measStatsItems_[22].statsItem_id = SQLSTATS_REQ_MSG_CNT; measStatsItems_[23].statsItem_id = SQLSTATS_REQ_MSG_BYTES; measStatsItems_[24].statsItem_id = SQLSTATS_REPLY_MSG_CNT; measStatsItems_[25].statsItem_id = SQLSTATS_REPLY_MSG_BYTES; measStatsItems_[26].statsItem_id = SQLSTATS_SCRATCH_FILE_COUNT; measStatsItems_[27].statsItem_id = SQLSTATS_SCRATCH_BUFFER_BLOCK_SIZE; measStatsItems_[28].statsItem_id = SQLSTATS_SCRATCH_BUFFER_BLOCKS_READ; measStatsItems_[29].statsItem_id = SQLSTATS_SCRATCH_BUFFER_BLOCKS_WRITTEN; measStatsItems_[30].statsItem_id = SQLSTATS_SCRATCH_OVERFLOW_MODE; measStatsItems_[31].statsItem_id = SQLSTATS_SCRATCH_READ_COUNT; measStatsItems_[32].statsItem_id = SQLSTATS_SCRATCH_WRITE_COUNT; measStatsItems_[33].statsItem_id = SQLSTATS_UDR_CPU_BUSY_TIME; // MAX_MEASSTATS_ENTRY is set to 34 cliRC = SQL_EXEC_GetStatisticsItems( reqType, reqStr, reqStrLen, MAX_MEASSTATS_ENTRY, measStatsItems_); if (cliRC != 0) throw("error"); break; case SQLSTATS_DESC_ROOT_OPER_STATS: break; case SQLSTATS_DESC_PERTABLE_STATS: if( !pertableStatsItems_ ) { pertableStatsItems_ = new (nothrow) SQLSTATS_ITEM[MAX_PERTABLE_ENTRY]; if (pertableStatsItems_ == NULL) { cliRC = 998; throw ("error");} } initSqlStatsItems(pertableStatsItems_, MAX_PERTABLE_ENTRY, &sqlStatsDesc_[currStatsDescEntry_]); pertableStatsItems_[0].statsItem_id = SQLSTATS_TABLE_ANSI_NAME; pertableStatsItems_[1].statsItem_id = SQLSTATS_EST_ROWS_ACCESSED; pertableStatsItems_[2].statsItem_id = SQLSTATS_EST_ROWS_USED; pertableStatsItems_[3].statsItem_id = SQLSTATS_ACT_ROWS_ACCESSED; pertableStatsItems_[4].statsItem_id = SQLSTATS_ACT_ROWS_USED; pertableStatsItems_[5].statsItem_id = SQLSTATS_DISK_IOS; pertableStatsItems_[6].statsItem_id = SQLSTATS_MSG_COUNT; pertableStatsItems_[7].statsItem_id = SQLSTATS_MSG_BYTES; pertableStatsItems_[8].statsItem_id = SQLSTATS_STATS_BYTES; pertableStatsItems_[9].statsItem_id = SQLSTATS_LOCK_ESCALATIONS; pertableStatsItems_[10].statsItem_id = SQLSTATS_LOCK_WAITS; pertableStatsItems_[11].statsItem_id = SQLSTATS_DP2_CPU_BUSY_TIME; pertableStatsItems_[12].statsItem_id = SQLSTATS_OPENS; pertableStatsItems_[13].statsItem_id = SQLSTATS_OPEN_TIME; if( pertableStatsItems_[0].str_value == NULL ) { tblName_ = pertableStatsItems_[0].str_value = new char[MAX_SQL_IDENTIFIER_LEN+1]; if (tblName_ == NULL) { cliRC = 999; throw ("error");} pertableStatsItems_[0].str_max_len = MAX_SQL_IDENTIFIER_LEN+1; } bzero(pertableStatsItems_[0].str_value, MAX_SQL_IDENTIFIER_LEN+1); // MAX_PERTABLE_ENTRY is set to 12 cliRC = SQL_EXEC_GetStatisticsItems( reqType, reqStr, reqStrLen, MAX_PERTABLE_ENTRY, pertableStatsItems_); if (cliRC != 0) throw("error"); // for (i = 0; i < MAX_PERTABLE_ENTRY; i++) { if (pertableStatsItems_[i].error_code != 0) continue; switch (pertableStatsItems_[i].statsItem_id) { case SQLSTATS_TABLE_ANSI_NAME: memcpy( perTableStats[currPerTblEntry_].tblName, pertableStatsItems_[i].str_value, pertableStatsItems_[i].str_ret_len); perTableStats[currPerTblEntry_].tblName[pertableStatsItems_[i].str_ret_len] = '\x0'; break; case SQLSTATS_EST_ROWS_ACCESSED: perTableStats[currPerTblEntry_].estAccessedRows = pertableStatsItems_[i].double_value; break; case SQLSTATS_EST_ROWS_USED: perTableStats[currPerTblEntry_].estUsedRows = pertableStatsItems_[i].double_value; break; case SQLSTATS_ACT_ROWS_ACCESSED: perTableStats[currPerTblEntry_].accessedRows = pertableStatsItems_[i].int64_value; break; case SQLSTATS_ACT_ROWS_USED: perTableStats[currPerTblEntry_].usedRows = pertableStatsItems_[i].int64_value; break; case SQLSTATS_DISK_IOS: perTableStats[currPerTblEntry_].diskIOs = pertableStatsItems_[i].int64_value; break; case SQLSTATS_MSG_COUNT: perTableStats[currPerTblEntry_].numMessages = pertableStatsItems_[i].int64_value; break; case SQLSTATS_MSG_BYTES: perTableStats[currPerTblEntry_].messagesBytes = pertableStatsItems_[i].int64_value; break; case SQLSTATS_LOCK_ESCALATIONS: perTableStats[currPerTblEntry_].escalations = pertableStatsItems_[i].int64_value; break; case SQLSTATS_LOCK_WAITS: perTableStats[currPerTblEntry_].lockWaits = pertableStatsItems_[i].int64_value; break; case SQLSTATS_DP2_CPU_BUSY_TIME: perTableStats[currPerTblEntry_].dp2BusyTime = pertableStatsItems_[i].int64_value; break; case SQLSTATS_OPENS: perTableStats[currPerTblEntry_].opens = pertableStatsItems_[i].int64_value; break; case SQLSTATS_OPEN_TIME: perTableStats[currPerTblEntry_].openTime = pertableStatsItems_[i].int64_value; break; default: break; } } currPerTblEntry_++; break; default: break; } currStatsDescEntry_++; } perTableRowSize = currPerTblEntry_; Int32 len; if (masterStatsItems_ != NULL) for (i = 0; i < MAX_MASTERSTATS_ENTRY; i++) { if (masterStatsItems_[i].error_code != 0) continue; switch (masterStatsItems_[i].statsItem_id) { case SQLSTATS_QUERY_ID: //char* masterStatsItems_[i].str_value[masterStatsItems_[i].str_ret_len] = '\0'; break; case SQLSTATS_COMP_START_TIME: //int64 compStartTime = masterStatsItems_[i].int64_value; break; case SQLSTATS_COMP_END_TIME: //int64 compEndTime = masterStatsItems_[i].int64_value; break; case SQLSTATS_COMP_TIME: //int64 compTime = masterStatsItems_[i].int64_value; break; case SQLSTATS_EXECUTE_START_TIME: //int64 exeStartTime = masterStatsItems_[i].int64_value; break; case SQLSTATS_FIRST_ROW_RET_TIME: //int64 firstRowReturnTime = masterStatsItems_[i].int64_value; break; case SQLSTATS_EXECUTE_END_TIME: //int64 exeEndTime = masterStatsItems_[i].int64_value; break; case SQLSTATS_EXECUTE_TIME: //int64 exeElapsedTime = masterStatsItems_[i].int64_value; break; case SQLSTATS_STMT_STATE: //int64 state = masterStatsItems_[i].int64_value; break; case SQLSTATS_ROWS_RETURNED: //int64 rowsReturned = masterStatsItems_[i].int64_value; break; case SQLSTATS_SQL_ERROR_CODE: //int64 sqlErrorCode = masterStatsItems_[i].int64_value; break; case SQLSTATS_STATS_ERROR_CODE: //int64 statsErrorCode = masterStatsItems_[i].int64_value; break; case SQLSTATS_QUERY_TYPE: //int64 break; case SQLSTATS_EST_ROWS_ACCESSED: //double estRowsAccessed = masterStatsItems_[i].double_value; break; case SQLSTATS_EST_ROWS_USED: //double estRowsUsed = masterStatsItems_[i].double_value; break; case SQLSTATS_FIXUP_TIME: break; case SQLSTATS_PARENT_QUERY_ID: strncpy(parentQID, masterStatsItems_[i].str_value, masterStatsItems_[i].str_ret_len); parentQID[masterStatsItems_[i].str_ret_len]='\0'; break; case SQLSTATS_NUM_SQLPROCS: numSqlProcs = masterStatsItems_[i].int64_value; break; case SQLSTATS_NUM_CPUS: numCpus = masterStatsItems_[i].int64_value; break; case SQLSTATS_SOURCE_STR: strncpy(sqlSrc, masterStatsItems_[i].str_value, masterStatsItems_[i].str_ret_len); sqlSrc[masterStatsItems_[i].str_ret_len]='\0'; break; case SQLSTATS_SOURCE_STR_LEN: sqlSrcLen = masterStatsItems_[i].int64_value; break; case SQLSTATS_MASTER_PRIORITY: exePriority = masterStatsItems_[i].int64_value; break; case SQLSTATS_TRANSID: transIDnum = masterStatsItems_[i].int64_value; if (transIDnum > 0) _i64toa(transIDnum, transID, 10); else { memset(transID, '\0', MAX_TXN_STR_LEN + 1); strcpy(transID,"<N/A>"); } // len = TransIdToText(masterStatsItems_[i].int64_value, transID, (short)(sizeof(transID))); // if (len > 0) // transID[len] = '\0'; break; case SQLSTATS_AQR_LAST_ERROR: AQRlastError = (short)masterStatsItems_[i].int64_value; break; case SQLSTATS_AQR_NUM_RETRIES: AQRnumRetries = (short)masterStatsItems_[i].int64_value; break; case SQLSTATS_AQR_DELAY_BEFORE_RETRY: AQRdelayBeforeRetry = (short)masterStatsItems_[i].int64_value; break; case SQLSTATS_ROWS_AFFECTED: NumRowsIUD = masterStatsItems_[i].int64_value; break; case SQLSTATS_CHILD_QUERY_ID: strncpy(childQID, masterStatsItems_[i].str_value, masterStatsItems_[i].str_ret_len); childQID[masterStatsItems_[i].str_ret_len]='\0'; break; case SQLSTATS_RECLAIM_SPACE_COUNT: break; case SQLSTATS_SUBQUERY_TYPE: char tmpSubQry[2]; memset(tmpSubQry,'\0',2); tmpSubQry[0]=masterStatsItems_[i].str_value[0]; if( strncmp(tmpSubQry,"2",1) ==0) strcpy(subQryType,"SQL_STMT_REPLICATE"); else if( strncmp(tmpSubQry,"1",1)==0 ) strcpy(subQryType,"SQL_STMT_CTAS"); else strcpy(subQryType,"SQL_STMT_NA"); break; case SQLSTATS_PARENT_QUERY_SYSTEM: strncpy(parentSysName, masterStatsItems_[i].str_value, masterStatsItems_[i].str_ret_len); parentSysName[masterStatsItems_[i].str_ret_len]='\0'; break; default: break; } } if (measStatsItems_ != NULL) { for (i = 0; i < MAX_MEASSTATS_ENTRY; i++) { if (measStatsItems_[i].error_code != 0) continue; switch (measStatsItems_[i].statsItem_id) { case SQLSTATS_ACT_ROWS_ACCESSED: //int64 AccessedRows = measStatsItems_[i].int64_value; break; case SQLSTATS_ACT_ROWS_USED: //int64 UsedRows = measStatsItems_[i].int64_value; break; case SQLSTATS_MSG_COUNT: //int64 NumMessages = measStatsItems_[i].int64_value; break; case SQLSTATS_MSG_BYTES: //int64 MessagesBytes = measStatsItems_[i].int64_value; break; case SQLSTATS_STATS_BYTES: //int64 StatsBytes = measStatsItems_[i].int64_value; break; case SQLSTATS_DISK_IOS: //int64 DiskIOs = measStatsItems_[i].int64_value; break; case SQLSTATS_LOCK_WAITS: //int64 LockWaits = measStatsItems_[i].int64_value; break; case SQLSTATS_LOCK_ESCALATIONS: //int64 Escalations = measStatsItems_[i].int64_value; break; case SQLSTATS_DP2_CPU_BUSY_TIME: //int64 DiskProcessBusyTime = measStatsItems_[i].int64_value; break; case SQLSTATS_SQL_CPU_BUSY_TIME: //int64 ProcessBusyTime = measStatsItems_[i].int64_value; break; case SQLSTATS_SQL_SPACE_ALLOC: //int64 SpaceTotal = measStatsItems_[i].int64_value; break; case SQLSTATS_SQL_SPACE_USED: //int64 SpaceUsed = measStatsItems_[i].int64_value; break; case SQLSTATS_SQL_HEAP_ALLOC: //int64 HeapTotal = measStatsItems_[i].int64_value; break; case SQLSTATS_SQL_HEAP_USED: //int64 HeapUsed = measStatsItems_[i].int64_value; break; case SQLSTATS_EID_SPACE_ALLOC: //int64 Dp2SpaceTotal = measStatsItems_[i].int64_value; break; case SQLSTATS_EID_SPACE_USED: //int64 Dp2SpaceUsed = measStatsItems_[i].int64_value; break; case SQLSTATS_EID_HEAP_ALLOC: //int64 Dp2HeapTotal = measStatsItems_[i].int64_value; break; case SQLSTATS_EID_HEAP_USED: //int64 Dp2HeapUsed = measStatsItems_[i].int64_value; break; case SQLSTATS_OPENS: //int64 Opens = measStatsItems_[i].int64_value; break; case SQLSTATS_OPEN_TIME: //int64 OpenTime = measStatsItems_[i].int64_value; break; case SQLSTATS_PROCESS_CREATED: //int64 NewProcess = measStatsItems_[i].int64_value; break; case SQLSTATS_PROCESS_CREATE_TIME: //int64 NewProcessTime = measStatsItems_[i].int64_value; break; case SQLSTATS_REQ_MSG_CNT: //int64 reqMsgCnt = measStatsItems_[i].int64_value; break; case SQLSTATS_REQ_MSG_BYTES: //int64 reqMsgBytes = measStatsItems_[i].int64_value; break; case SQLSTATS_REPLY_MSG_CNT: //int64 replyMsgCnt = measStatsItems_[i].int64_value; break; case SQLSTATS_REPLY_MSG_BYTES: //int64 replyMsgBytes = measStatsItems_[i].int64_value; break; case SQLSTATS_SCRATCH_FILE_COUNT: ScratchFileCount = measStatsItems_[i].int64_value; break; case SQLSTATS_SCRATCH_BUFFER_BLOCK_SIZE: ScratchBufferBlockSize = measStatsItems_[i].int64_value; break; case SQLSTATS_SCRATCH_BUFFER_BLOCKS_READ: ScratchBufferBlocksRead = measStatsItems_[i].int64_value; break; case SQLSTATS_SCRATCH_BUFFER_BLOCKS_WRITTEN: ScratchBufferBlocksWritten = measStatsItems_[i].int64_value; break; case SQLSTATS_SCRATCH_OVERFLOW_MODE: ScratchOverflowMode = (short)measStatsItems_[i].int64_value; break; case SQLSTATS_SCRATCH_READ_COUNT: ScratchBufferReadCount = measStatsItems_[i].int64_value; break; case SQLSTATS_SCRATCH_WRITE_COUNT: ScratchBufferWriteCount = measStatsItems_[i].int64_value; break; case SQLSTATS_UDR_CPU_BUSY_TIME: UdrCpuTime = measStatsItems_[i].int64_value; break; default: break; } } if (pSrvrStmt != NULL) { pSrvrStmt->m_execOverflow.m_OvfFileCount = ScratchFileCount; pSrvrStmt->m_execOverflow.m_OvfSpaceAllocated = (ScratchFileCount * 2 * ONE_GB) / ONE_KB; pSrvrStmt->m_execOverflow.m_OvfSpaceUsed = ScratchBufferBlocksWritten * ScratchBufferBlockSize; pSrvrStmt->m_execOverflow.m_OvfBlockSize = ScratchBufferBlockSize; pSrvrStmt->m_execOverflow.m_OvfIOs = ScratchBufferReadCount + ScratchBufferWriteCount; pSrvrStmt->m_execOverflow.m_OvfMessageBuffersTo = ScratchBufferBlocksWritten; pSrvrStmt->m_execOverflow.m_OvfMessageTo = ScratchBufferWriteCount; pSrvrStmt->m_execOverflow.m_OvfMessageBytesTo = ScratchBufferBlocksWritten * ScratchBufferBlockSize; pSrvrStmt->m_execOverflow.m_OvfMessageBuffersFrom = ScratchBufferBlocksRead; pSrvrStmt->m_execOverflow.m_OvfMessageFrom = ScratchBufferReadCount; pSrvrStmt->m_execOverflow.m_OvfMessageBytesFrom = ScratchBufferBlocksRead * ScratchBufferBlockSize; } TotalMemAlloc = SpaceUsed + HeapUsed; MaxMemUsed = SpaceTotal + HeapTotal + Dp2SpaceTotal + Dp2HeapTotal; } } //LCOV_EXCL_START catch(...) { statsErrorCode = cliRC; if (pSrvrStmt != NULL) pSrvrStmt->m_mxsrvr_substate = NDCS_RMS_ERROR; SQL_EXEC_ClearDiagnostics(NULL); // Fix for query disappearing when the below CQD is issued // 'CQD detailed_statistics 'ALL' CR?? init_rms_counters(); } //LCOV_EXCL_STOP srvrGlobal->allocatedResources += NewProcess; if (pSrvrStmt != NULL) if (rmsSqlSource_ != NULL && pSrvrStmt->m_bNewQueryId == true ) { bzero (pSrvrStmt->m_shortQueryText, sizeof(pSrvrStmt->m_shortQueryText)); pSrvrStmt->m_rmsSqlSourceLen = sqlSrcLen + 1; translateToUTF8(srvrGlobal->isoMapping, rmsSqlSource_, sqlSrcLen + 1, pSrvrStmt->m_shortQueryText, RMS_STORE_SQL_SOURCE_LEN); pSrvrStmt->m_bNewQueryId = false; } if (sqlStatsDesc_ != NULL) delete[] sqlStatsDesc_; if (masterStatsItems_ != NULL) delete[] masterStatsItems_; if (measStatsItems_ != NULL) delete[] measStatsItems_; if (qrid_ != NULL) delete[] qrid_; if (parentid_ != NULL) delete[] parentid_; if (childid_ != NULL) delete[] childid_; if (rmsSqlSource_ != NULL) delete[] rmsSqlSource_; if (pertableStatsItems_ != NULL) delete[] pertableStatsItems_; if (tblName_ != NULL) delete[] tblName_; if (subQueryType_ != NULL) delete[] subQueryType_; if (parentSystem_ != NULL) delete[] parentSystem_; } // Single Row Per Query Initiative - // // generate the delayed 21036 message to repository // void ResStatisticsStatement::endRepository(SRVR_STMT_HDL *pSrvrStmt, Int32 sqlWarningOrErrorLength, BYTE *sqlWarningOrError, bool bClose_Fetch) { SQL_COMPILATION_STATS_DATA comp_stats_data; /* if (*flag_21036 == false) // do not generate message return; */ if (pSrvrStmt == NULL || pSrvrStmt->sqlUniqueQueryID == NULL || pSrvrStmt->sqlUniqueQueryID[0] == '\0' ) return; if (pSrvrStmt->m_need_21036_end_msg == false) // do not generate message return; if (pSrvrStmt->bLowCost == true) // Low Cost - do not generate message. Should not reach here if above is false. return; // // AGGREGATION----------------endRepository EXECUTE // pSrvrStmt->m_lastQueryEndTime = statementEndTime; pSrvrStmt->m_lastQueryEndCpuTime = statementEndCpuTime; comp_stats_data = pSrvrStmt->comp_stats_info.compilationStats; if (comp_stats_data.compilerId[0] == 0) strcpy(comp_stats_data.compilerId,"<N/A>"); if (srvrGlobal->m_statisticsPubType == STATISTICS_AGGREGATED) { if (resStatSession != NULL) resStatSession->accumulateStatistics(this); } if (pSrvrStmt->m_bqueryFinish == false) // do not generate message return; Int32 inState = pSrvrStmt->inState, inSqlQueryType = pSrvrStmt->sqlQueryType; char *sqlString = pSrvrStmt->sqlString; bool isClosed = pSrvrStmt->isClosed; bool newStmt = false; SQL_QUERY_COST_INFO cost_info = pSrvrStmt->cost_info; unsigned short queryState = pSrvrStmt->m_state; UInt32 maxMemUsed = pSrvrStmt->m_maxMemUsed; unsigned short warnLevel = pSrvrStmt->m_warnLevel; int64 execTime = pSrvrStmt->m_exec_time, holdTime = pSrvrStmt->m_hold_time + pSrvrStmt->m_suspended_time, suspendTime = pSrvrStmt->m_suspended_time, waitTime = pSrvrStmt->m_wait_time, WMSstartTS = pSrvrStmt->m_WMSstart_ts; memcpy(con_rule_name, pSrvrStmt->m_con_rule_name, sizeof(con_rule_name)); memcpy(cmp_rule_name, pSrvrStmt->m_cmp_rule_name, sizeof(cmp_rule_name)); memcpy(exe_rule_name, pSrvrStmt->m_exe_rule_name, sizeof(exe_rule_name)); int64 inexeEndTime = exeEndTime; // SQLSTATS_EXECUTE_END_TIME int64 inqueryElapseTime = 0; int64 inqueryExecutionTime = 0; char *pBuffer = msgBuffer; if (memcmp(queryId, pSrvrStmt->sqlUniqueQueryID, pSrvrStmt->sqlUniqueQueryIDLen) != 0) newStmt = true; if (pSrvrStmt->queryStartTime == 0) inqueryElapseTime = pSrvrStmt->queryEndTime - statementStartTime; else inqueryElapseTime = pSrvrStmt->queryEndTime - pSrvrStmt->queryStartTime; if (pSrvrStmt->queryStartCpuTime == 0) inqueryExecutionTime = pSrvrStmt->queryEndCpuTime - statementStartCpuTime; else inqueryExecutionTime = pSrvrStmt->queryEndCpuTime - pSrvrStmt->queryStartCpuTime; if (inqueryElapseTime <= 0) { statementEndTime = JULIANTIMESTAMP(); statementEndCpuTime = getCpuTime(); inqueryElapseTime = statementEndTime - pSrvrStmt->queryStartTime; inqueryExecutionTime = statementEndCpuTime - pSrvrStmt->queryStartCpuTime; } if (inexeEndTime <= 0) inexeEndTime = statementEndTime; if (pSrvrStmt->m_maxMemUsed > MaxMemUsed) MaxMemUsed = pSrvrStmt->m_maxMemUsed; if (inState == STMTSTAT_EXECUTE) { // log the end message if ((stmtType != TYPE_SELECT && isClosed == TRUE) || (inSqlQueryType == SQL_SELECT_UNIQUE) || (errorCode != SQL_SUCCESS && errorCode != SQL_SUCCESS_WITH_INFO)) { if (stmtType == TYPE_INSERT || stmtType == TYPE_INSERT_PARAM || stmtType == TYPE_DELETE || stmtType == TYPE_UPDATE || inSqlQueryType == SQL_SELECT_UNIQUE) { string querySubstate; if (queryState == QUERY_INIT || queryState == QUERY_COMPLETED) { queryState = QUERY_COMPLETED; querySubstate = pSrvrStmt->m_mxsrvr_substate == NDCS_INIT? getQuerySubStateStringRes(pSrvrStmt->m_state) : getSrvrSubstate(pSrvrStmt->m_mxsrvr_substate); } else querySubstate = getQuerySubStateStringRes(queryState); stringstream ss; ss << "File: " << __FILE__ << ", Fuction: " << __FUNCTION__ << ", Line: " << __LINE__ << ", QID: " << pSrvrStmt->sqlUniqueQueryID; EXEC_OVERFLOW execOverflow = { 0 }; memcpy(&execOverflow, &pSrvrStmt->m_execOverflow, sizeof(EXEC_OVERFLOW)); // Send query end message if (srvrGlobal->m_bStatisticsEnabled && srvrGlobal->m_statisticsPubType==STATISTICS_AGGREGATED && (srvrGlobal->m_iQueryPubThreshold==0 || pubStarted)) SendQueryStats(false, pSrvrStmt, (char *)sqlWarningOrError, sqlWarningOrErrorLength); } else { // non unique select error condition string querySubstate; if (queryState == QUERY_INIT || queryState == QUERY_COMPLETED) { queryState = QUERY_COMPLETED; querySubstate = pSrvrStmt->m_mxsrvr_substate == NDCS_INIT? getQuerySubStateStringRes(pSrvrStmt->m_state) : getSrvrSubstate(pSrvrStmt->m_mxsrvr_substate); } else querySubstate = getQuerySubStateStringRes(queryState); stringstream ss; ss << "File: " << __FILE__ << ", Fuction: " << __FUNCTION__ << ", Line: " << __LINE__ << ", QID: " << pSrvrStmt->sqlUniqueQueryID; EXEC_OVERFLOW execOverflow = { 0 }; memcpy(&execOverflow, &pSrvrStmt->m_execOverflow, sizeof(EXEC_OVERFLOW)); // Send query end message if (srvrGlobal->m_bStatisticsEnabled && srvrGlobal->m_statisticsPubType==STATISTICS_AGGREGATED && (srvrGlobal->m_iQueryPubThreshold==0 || pubStarted)) SendQueryStats(false, pSrvrStmt, (char *)sqlWarningOrError, sqlWarningOrErrorLength); } /* *flag_21036 = false; */ pSrvrStmt->m_need_21036_end_msg = false; } } if ((inState == STMTSTAT_FETCH) || inState == STMTSTAT_CLOSE) { string querySubstate; if (queryState == QUERY_INIT || queryState == QUERY_COMPLETED) { queryState = QUERY_COMPLETED; querySubstate = pSrvrStmt->m_mxsrvr_substate == NDCS_INIT? getQuerySubStateStringRes(pSrvrStmt->m_state) : getSrvrSubstate(pSrvrStmt->m_mxsrvr_substate); } else { querySubstate = getQuerySubStateStringRes(queryState); } stringstream ss; ss << "File: " << __FILE__ << ", Fuction: " << __FUNCTION__ << ", Line: " << __LINE__ << ", QID: " << pSrvrStmt->sqlUniqueQueryID; EXEC_OVERFLOW execOverflow = { 0 }; memcpy(&execOverflow, &pSrvrStmt->m_execOverflow, sizeof(EXEC_OVERFLOW)); // Send query end message if (srvrGlobal->m_bStatisticsEnabled && srvrGlobal->m_statisticsPubType==STATISTICS_AGGREGATED && (srvrGlobal->m_iQueryPubThreshold==0 || pubStarted)) SendQueryStats(false, pSrvrStmt, (char *)sqlWarningOrError, sqlWarningOrErrorLength); /* *flag_21036 = false; */ pSrvrStmt->m_need_21036_end_msg = false; } } // // Single Row Per Query initiative - // // WMS returned error on DO_WouldLikeToExecute (after Prepare), we still need to generate // 21036 start and end message for Repository. // // this funtion takes care of a) start message b) get the stats from RMS // void ResStatisticsStatement::toRepository(SRVR_STMT_HDL *pSrvrStmt, Int32 sqlWarningOrErrorLength, BYTE *sqlWarningOrError) { Int32 inState = pSrvrStmt->inState, inSqlNewQueryType = pSrvrStmt->sqlNewQueryType; char *inSqlStatement = pSrvrStmt->sqlString; SQL_QUERY_COST_INFO cost_info = pSrvrStmt->cost_info; SQL_QUERY_COMPILER_STATS_INFO comp_stats_info = pSrvrStmt->comp_stats_info; SQL_COMPILATION_STATS_DATA comp_stats_data = comp_stats_info.compilationStats; unsigned short queryState = pSrvrStmt->m_state; unsigned short warnLevel = pSrvrStmt->m_warnLevel; int64 holdTime = pSrvrStmt->m_hold_time, waitTime = pSrvrStmt->m_wait_time, WMSstartTS = pSrvrStmt->m_WMSstart_ts; memcpy(con_rule_name, pSrvrStmt->m_con_rule_name, sizeof(con_rule_name)); memcpy(cmp_rule_name, pSrvrStmt->m_cmp_rule_name, sizeof(cmp_rule_name)); memcpy(exe_rule_name, pSrvrStmt->m_exe_rule_name, sizeof(exe_rule_name)); char *pBuffer = msgBuffer; // // AGGREGATION----------------toRepository // if (comp_stats_info.dop > 1) estTotalMem = cost_info.estimatedTotalMem * comp_stats_info.dop; else estTotalMem = cost_info.estimatedTotalMem; if (pSrvrStmt->queryStartTime > 0) { queryStartTime = pSrvrStmt->queryStartTime; queryStartCpuTime = pSrvrStmt->queryStartCpuTime; } strcpy(queryId, pSrvrStmt->sqlUniqueQueryID); if (comp_stats_data.compilerId[0] == 0) strcpy(comp_stats_data.compilerId,"<N/A>"); //if (comp_stats_data.compileInfoLen <= 0 || comp_stats_data.compileInfo[0] == 0) // strcpy(comp_stats_data.compileInfo,"<N/A>"); if (inSqlStatement != NULL && pSrvrStmt->bLowCost == false) { // log the start message 21036 stringstream ss; ss << "File: " << __FILE__ << ", Fuction: " << __FUNCTION__ << ", Line: " << __LINE__ << ", QID: " << queryId; /* SetQueryStatsInfoSqlText( (const char *)ss.str().c_str() //"ResStatisticsStatement::toRepository()" , (const char *)queryId , queryStartTime , (const char *)inSqlStatement ); */ pSrvrStmt->m_need_21036_end_msg = true; // now get stats from RMS // setStatistics(); } } void ResStatisticsStatement::prepareQuery(struct collect_info *setinit) { if (setinit != NULL) { strcpy(resCollectinfo.clientId,setinit->clientId); strcpy(resCollectinfo.userName,setinit->userName); strcpy(resCollectinfo.applicationId,setinit->applicationId); strcpy(resCollectinfo.nodeName,setinit->nodeName); strcpy(resCollectinfo.cpuPin,setinit->cpuPin); if (resCollectinfo.startPriority == 0) resCollectinfo.startPriority = setinit->startPriority; if (resCollectinfo.currentPriority == 0) resCollectinfo.currentPriority = setinit->startPriority; strcpy(resCollectinfo.DSName,setinit->DSName); resCollectinfo.userId = setinit->userId; } //char stmtLabel[MAX_STMT_LABEL_LEN+1]; strcpy(stmtLabel,"STMT_INTERNAL_STATISTICS"); resSrvrStmt = getSrvrStmt(stmtLabel,TRUE); if (resSrvrStmt == NULL) { sprintf(tmpString, "%s", "Unable to allocate statement to Statistics."); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_INTERNAL_STATISTICS", tmpString); } else { // Fix for PERTABLE stats CQD. The second parameter to the stored procedure is being // changed to 'MERGE=2,QID=CURRENT' so that we get back ACCUMULATED stats even if // the detailed_statistics CQD is set to PERTABLE. // Also don't need the below CQD since we'll be defaulting to PERTABLE stats // in InitializeDialogue. /* strcpy(sqlString, "CONTROL QUERY DEFAULT detailed_statistics 'ACCUMULATED'"); retcode = resSrvrStmt->ExecDirect(NULL, sqlString, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); switch (retcode) { case SQL_ERROR: strcpy(tmpString, "Error in Executing Control Query for Statistics. Statement Statistics Disabled. "); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); break; default: break; } */ // strcpy(sqlString, "select variable_info from table(statistics(null,null))"); strcpy(sqlString, "select variable_info from table(statistics(null,_ISO88591'MERGE=2,QID=CURRENT')) for read uncommitted access"); retcode = resSrvrStmt->Prepare(sqlString,INTERNAL_STMT,SQL_ASYNC_ENABLE_OFF, 0); switch (retcode) { case SQL_ERROR: strcpy(tmpString, "Error in Preparing Query for Statistics Procedure. Statement Statistics Disabled"); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); break; default: break; } } } inline string ResStatisticsStatement::getErrorText(char *inSqlError, size_t inSqlErrorLength, size_t inMaxSqlErrorLength) { string s1; if (inSqlErrorLength > 0 && inSqlError != NULL) { err_desc_def *error = (err_desc_def*)inSqlError; s1.assign(inSqlError + 16, _min(inMaxSqlErrorLength, error->length-1)); } else s1.assign("<N/A>"); return s1; } void ResStatisticsStatement::appendErrorText(char *inSqlError, Int32 inSqlErrorLength) { int length = 0; int strSize = 0; err_desc_def *error; error = (err_desc_def*)inSqlError; strcat(msgInfo, " ErrorText:"); if ((inSqlErrorLength > 0) && (inSqlError != NULL)) { // Have at least 24 characters of error text - Initial string (till end of error number) takes 16 chars // For example: // 12345678901234567890...... // *** ERROR[nnnn] error text .. [date and time] length = strlen(msgInfo); strSize = BUFFERSIZE - (length + 40); } if (strSize > 0) //strncat(msgInfo, inSqlError, min(strSize,inSqlErrorLength)); strncat(msgInfo, inSqlError+16, _min(strSize,error->length)); else strcat(msgInfo, "<N/A>"); } int64 ResStatisticsStatement::getCpuTime() { short error; int64 cpuTime = 0; char errorString[32]; struct rusage my_usage; if (error = getrusage(RUSAGE_SELF, &my_usage) != 0 ) { sprintf(errorString, "%d", errno); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errorString); return cpuTime; } cpuTime = my_usage.ru_utime.tv_sec * 1000000 + my_usage.ru_stime.tv_sec * 1000000 + my_usage.ru_stime.tv_usec + my_usage.ru_utime.tv_usec; return cpuTime; } short ResStatisticsStatement::currentPriority() { short error; short curPriority = 0; char errorString[32]; return curPriority; } void ResStatisticsStatement::setStatisticsFlag(bool setStatisticsFlag) { statStatisticsFlag = setStatisticsFlag; } char *typeOfStatementList[] = { "SQL_OTHER", "SQL_UNKNOWN", "SQL_SELECT_UNIQUE", "SQL_SELECT_NON_UNIQUE", "SQL_INSERT_UNIQUE", "SQL_INSERT_NON_UNIQUE", "SQL_UPDATE_UNIQUE", "SQL_UPDATE_NON_UNIQUE", "SQL_DELETE_UNIQUE", "SQL_DELETE_NON_UNIQUE", "SQL_CONTROL", "SQL_SET_TRANSACTION", "SQL_SET_CATALOG", "SQL_SET_SCHEMA", "SQL_CALL_NO_RESULT_SETS", "SQL_CALL_WITH_RESULT_SETS", "SQL_SP_RESULT_SET", "SQL_INSERT_RWRS", "SQL_CAT_UTIL", "SQL_EXE_UTIL", "SQL_SELECT_UNLOAD", "SQL_NOT_SUPPORTED" }; char* ResStatisticsStatement::getStatementType(Int32 inSqlQType) { if(inSqlQType>=SQL_OTHER && inSqlQType<=SQL_SELECT_UNLOAD) return typeOfStatementList[inSqlQType+1]; else return typeOfStatementList[21]; } // // AGGREGATION----------------procedures // const char* ResStatisticsStatement::mapEmptyToNA(char* input) { if (input[0] != 0 && input[0] != ' ') return input; return "<N/A>"; } void ResStatisticsStatement::SendQueryStats(bool bStart, SRVR_STMT_HDL *pSrvrStmt, char *inSqlError, Int32 inSqlErrorLength) { SQL_COMPILATION_STATS_DATA comp_stats_data; comp_stats_data = pSrvrStmt->comp_stats_info.compilationStats; std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> pQuery_info = std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION>(new STATEMENT_QUERYEXECUTION); *pQuery_info = {0}; long long endtime = 0; if (!bStart) endtime = JULIANTIMESTAMP(); pQuery_info->m_process_id = srvrGlobal->process_id; pQuery_info->m_thread_id = srvrGlobal->receiveThrId; pQuery_info->m_node_id = srvrGlobal->m_NodeId; pQuery_info->m_ip_address_id = srvrGlobal->IpAddress; pQuery_info->m_process_name = srvrGlobal->m_ProcName; pQuery_info->m_exec_start_utc_ts = queryStartTime; pQuery_info->m_query_id = queryId; pQuery_info->m_user_name = srvrGlobal->userSID; pQuery_info->m_role_name = srvrGlobal->QSRoleName; pQuery_info->m_start_priority = resCollectinfo.startPriority; pQuery_info->m_session_id = srvrGlobal->sessionId; pQuery_info->m_client_name = srvrGlobal->ClientComputerName; pQuery_info->m_application_name = srvrGlobal->ApplicationName; UpdateStringText(pQuery_info->m_application_name); pQuery_info->m_statement_id = statementId; pQuery_info->m_statement_type = getStatementType(pSrvrStmt->sqlQueryType); //SUBMIT_UTC_TS=EXEC_START_UTC_TS for now //Will fix it once wms is brought in pQuery_info->m_submit_utc_ts = pQuery_info->m_exec_start_utc_ts; if (!pubStarted) { pQuery_info->m_compile_start_utc_ts = comp_stats_data.compileStartTime; pQuery_info->m_compile_end_utc_ts = comp_stats_data.compileEndTime; pQuery_info->m_compile_elapsed_time = comp_stats_data.compileEndTime - comp_stats_data.compileStartTime; pQuery_info->m_cmp_affinity_num = pSrvrStmt->comp_stats_info.affinityNumber; pQuery_info->m_cmp_dop = pSrvrStmt->comp_stats_info.dop; pQuery_info->m_cmp_txn_needed = pSrvrStmt->comp_stats_info.xnNeeded; pQuery_info->m_cmp_mandatory_x_prod = pSrvrStmt->comp_stats_info.mandatoryCrossProduct; pQuery_info->m_cmp_missing_stats = pSrvrStmt->comp_stats_info.missingStats; pQuery_info->m_cmp_num_joins = pSrvrStmt->comp_stats_info.numOfJoins; pQuery_info->m_cmp_full_scan_on_table = pSrvrStmt->comp_stats_info.fullScanOnTable; pQuery_info->m_cmp_rows_accessed_full_scan = max(double(0),pSrvrStmt->comp_stats_info.rowsAccessedForFullScan); pQuery_info->m_est_accessed_rows = estRowsAccessed; pQuery_info->m_est_used_rows = estRowsUsed; pQuery_info->m_cmp_compiler_id = comp_stats_data.compilerId; pQuery_info->m_cmp_cpu_path_length = comp_stats_data.cmpCpuTotal; pQuery_info->m_cmp_cpu_binder = comp_stats_data.cmpCpuBinder; pQuery_info->m_cmp_cpu_normalizer = comp_stats_data.cmpCpuNormalizer; pQuery_info->m_cmp_cpu_analyzer = comp_stats_data.cmpCpuAnalyzer; pQuery_info->m_cmp_cpu_optimizer = comp_stats_data.cmpCpuOptimizer; pQuery_info->m_cmp_cpu_generator = comp_stats_data.cmpCpuGenerator; pQuery_info->m_cmp_metadata_cache_hits = comp_stats_data.metadataCacheHits; pQuery_info->m_cmp_metadata_cache_lookups = comp_stats_data.metadataCacheLookups; pQuery_info->m_cmp_query_cache_status = comp_stats_data.queryCacheState; pQuery_info->m_cmp_histogram_cache_hits = comp_stats_data.histogramCacheHits; pQuery_info->m_cmp_histogram_cache_lookups = comp_stats_data.histogramCacheLookups; pQuery_info->m_cmp_stmt_heap_size = comp_stats_data.stmtHeapSize; pQuery_info->m_cmp_context_heap_size = comp_stats_data.cxtHeapSize; pQuery_info->m_cmp_optimization_tasks = comp_stats_data.optTasks; pQuery_info->m_cmp_optimization_contexts = comp_stats_data.optContexts; pQuery_info->m_cmp_is_recompile = comp_stats_data.isRecompile; pQuery_info->m_est_num_seq_ios = pSrvrStmt->cost_info.numSeqIOs; pQuery_info->m_est_num_rand_ios = pSrvrStmt->cost_info.numRandIOs; pQuery_info->m_est_cost = estimatedCost; pQuery_info->m_est_cardinality = pSrvrStmt->cost_info.cardinality; pQuery_info->m_est_io_time = pSrvrStmt->cost_info.ioTime; pQuery_info->m_est_msg_time = pSrvrStmt->cost_info.msgTime; pQuery_info->m_est_idle_time = pSrvrStmt->cost_info.idleTime; pQuery_info->m_est_cpu_time = pSrvrStmt->cost_info.cpuTime; pQuery_info->m_est_total_time = pSrvrStmt->cost_info.totalTime; pQuery_info->m_est_total_mem = pSrvrStmt->cost_info.estimatedTotalMem; pQuery_info->m_est_resource_usage = pSrvrStmt->cost_info.resourceUsage; //pQuery_info->m_aggregation_option = pQuery_info->m_cmp_number_of_bmos = pSrvrStmt->comp_stats_info.numOfBmos; char overflowmode_str[64]; memset(overflowmode_str,0,sizeof(overflowmode_str)); sprintf(overflowmode_str,"%u",pSrvrStmt->comp_stats_info.overflowMode); pQuery_info->m_cmp_overflow_mode = string(overflowmode_str); pQuery_info->m_cmp_overflow_size = pSrvrStmt->comp_stats_info.overflowSize; } //pQuery_info->m_aggregate_total = pQuery_info->m_stats_error_code = statsErrorCode; pQuery_info->m_query_elapsed_time = JULIANTIMESTAMP() - queryStartTime; pQuery_info->m_sql_process_busy_time = ProcessBusyTime; pQuery_info->m_disk_process_busy_time = DiskProcessBusyTime; pQuery_info->m_disk_ios = DiskIOs; pQuery_info->m_num_sql_processes = numSqlProcs; pQuery_info->m_sql_space_allocated = SpaceTotal; pQuery_info->m_sql_space_used = SpaceUsed; pQuery_info->m_sql_heap_allocated = HeapTotal; pQuery_info->m_sql_heap_used = HeapUsed; pQuery_info->m_total_mem_alloc = TotalMemAlloc; pQuery_info->m_max_mem_used = MaxMemUsed; pQuery_info->m_transaction_id = transID; pQuery_info->m_num_request_msgs = reqMsgCnt; pQuery_info->m_num_request_msg_bytes = reqMsgBytes; pQuery_info->m_num_reply_msgs = replyMsgCnt; pQuery_info->m_num_reply_msg_bytes = replyMsgBytes; pQuery_info->m_first_result_return_utc_ts = firstRowReturnTime; pQuery_info->m_rows_returned_to_master = rowsReturned; pQuery_info->m_parent_query_id = parentQID; pQuery_info->m_parent_system_name = parentSysName; pQuery_info->m_exec_end_utc_ts = endtime; pQuery_info->m_master_execution_time = queryExecutionTime; pQuery_info->m_master_elapse_time = queryElapseTime; pQuery_info->m_query_status = getQueryStateStringRes(pSrvrStmt->m_state); pQuery_info->m_query_sub_status = getQuerySubStateStringRes(pSrvrStmt->m_state); pQuery_info->m_error_code = errorCode; pQuery_info->m_sql_error_code = sqlErrorCode; pQuery_info->m_error_text = getErrorText(inSqlError, inSqlErrorLength, MAX_ERROR_TEXT_LENGTH); UpdateStringText(pQuery_info->m_error_text); if(pSrvrStmt->sqlString!=NULL) { pQuery_info->m_query_text =pSrvrStmt->sqlString; UpdateStringText(pQuery_info->m_query_text); } if (pSrvrStmt->exPlan != SRVR_STMT_HDL::STORED && pSrvrStmt->sqlPlan != NULL && pSrvrStmt->sqlPlanLen > 0) { pQuery_info->m_explain_plan = new char[pSrvrStmt->sqlPlanLen]; if (pQuery_info->m_explain_plan != NULL) { memcpy( pQuery_info->m_explain_plan, pSrvrStmt->sqlPlan, pSrvrStmt->sqlPlanLen ); pQuery_info->m_explain_plan_len = pSrvrStmt->sqlPlanLen; pSrvrStmt->exPlan = SRVR_STMT_HDL::STORED; // Ignores for updates since plan does not change } else SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "SQL explain plan"); } pQuery_info->m_last_error_before_aqr = AQRlastError; pQuery_info->m_delay_time_before_aqr_sec = AQRdelayBeforeRetry; pQuery_info->m_total_num_aqr_retries = AQRnumRetries; pQuery_info->m_msg_bytes_to_disk = replyMsgBytes; pQuery_info->m_msgs_to_disk = replyMsgCnt; pQuery_info->m_rows_accessed = AccessedRows; pQuery_info->m_rows_retrieved = rowsReturned; pQuery_info->m_num_rows_iud = NumRowsIUD; pQuery_info->m_processes_created = NewProcess; pQuery_info->m_process_create_busy_time = NewProcessTime; pQuery_info->m_ovf_file_count = ScratchFileCount; pQuery_info->m_ovf_space_allocated = SpaceTotal; pQuery_info->m_ovf_space_used = SpaceUsed; pQuery_info->m_ovf_block_size = ScratchBufferBlockSize; pQuery_info->m_ovf_write_read_count = ScratchBufferReadCount + ScratchBufferWriteCount; pQuery_info->m_ovf_write_count = ScratchBufferWriteCount; pQuery_info->m_ovf_buffer_blocks_written = ScratchBufferBlocksWritten; //pQuery_info->m_ovf_buffer_bytes_written = ; pQuery_info->m_ovf_read_count = ScratchBufferReadCount; pQuery_info->m_ovf_buffer_blocks_read = ScratchBufferBlocksRead; //pQuery_info->m_ovf_buffer_bytes_read = ; //pQuery_info->m_num_nodes = ; pQuery_info->m_udr_process_busy_time = ProcessBusyTime; pQuery_info->m_pertable_stats = perTableRowSize; pQuery_info->m_last_updated_time = pQuery_info->m_last_updated_time; if (!pubStarted) sendQueryStats(PUB_TYPE_STATEMENT_NEW_QUERYEXECUTION, pQuery_info); else sendQueryStats(PUB_TYPE_STATEMENT_UPDATE_QUERYEXECUTION, pQuery_info); if (bStart) pubStarted = true; }
1
15,543
Should MAX_PERTABLE_ENTRY here be 10 ?
apache-trafodion
cpp
@@ -29,6 +29,17 @@ namespace Microsoft.AspNet.Server.Kestrel.Http private static readonly ArraySegment<byte> _emptyData = new ArraySegment<byte>(new byte[0]); private static readonly byte[] _hex = Encoding.ASCII.GetBytes("0123456789abcdef"); + private static readonly byte[] _bytesConnectionClose = Encoding.ASCII.GetBytes("\r\nConnection: close"); + private static readonly byte[] _bytesConnectionKeepAlive = Encoding.ASCII.GetBytes("\r\nConnection: keep-alive"); + private static readonly byte[] _bytesTransferEncodingChunked = Encoding.ASCII.GetBytes("\r\nTransfer-Encoding: chunked"); + private static readonly byte[] _bytesHttpVersion1_0 = Encoding.ASCII.GetBytes("HTTP/1.0 "); + private static readonly byte[] _bytesHttpVersion1_1 = Encoding.ASCII.GetBytes("HTTP/1.1 "); + private static readonly byte[] _bytesContentLengthZero = Encoding.ASCII.GetBytes("\r\nContent-Length: 0"); + private static readonly byte[] _bytesSpace = Encoding.ASCII.GetBytes(" "); + private static readonly byte[] _bytesServer = Encoding.ASCII.GetBytes("\r\nServer: Kestrel"); + private static readonly byte[] _bytesDate = Encoding.ASCII.GetBytes("Date: "); + private static readonly byte[] _bytesEndHeaders = Encoding.ASCII.GetBytes("\r\n\r\n"); + private readonly object _onStartingSync = new Object(); private readonly object _onCompletedSync = new Object(); private readonly FrameRequestHeaders _requestHeaders = new FrameRequestHeaders();
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Net; using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.AspNet.Http; using Microsoft.AspNet.Http.Features; using Microsoft.AspNet.Server.Kestrel.Filter; using Microsoft.AspNet.Server.Kestrel.Infrastructure; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Primitives; // ReSharper disable AccessToModifiedClosure namespace Microsoft.AspNet.Server.Kestrel.Http { public partial class Frame : FrameContext, IFrameControl { private static readonly Encoding _ascii = Encoding.ASCII; private static readonly ArraySegment<byte> _endChunkBytes = CreateAsciiByteArraySegment("\r\n"); private static readonly ArraySegment<byte> _endChunkedResponseBytes = CreateAsciiByteArraySegment("0\r\n\r\n"); private static readonly ArraySegment<byte> _continueBytes = CreateAsciiByteArraySegment("HTTP/1.1 100 Continue\r\n\r\n"); private static readonly ArraySegment<byte> _emptyData = new ArraySegment<byte>(new byte[0]); private static readonly byte[] _hex = Encoding.ASCII.GetBytes("0123456789abcdef"); private readonly object _onStartingSync = new Object(); private readonly object _onCompletedSync = new Object(); private readonly FrameRequestHeaders _requestHeaders = new FrameRequestHeaders(); private readonly FrameResponseHeaders _responseHeaders = new FrameResponseHeaders(); private List<KeyValuePair<Func<object, Task>, object>> _onStarting; private List<KeyValuePair<Func<object, Task>, object>> _onCompleted; private bool _requestProcessingStarted; private Task _requestProcessingTask; private volatile bool _requestProcessingStopping; // volatile, see: https://msdn.microsoft.com/en-us/library/x13ttww7.aspx private volatile bool _requestAborted; private CancellationTokenSource _disconnectCts = new CancellationTokenSource(); private CancellationTokenSource _requestAbortCts; private FrameRequestStream _requestBody; private FrameResponseStream _responseBody; private bool _responseStarted; private bool _keepAlive; private bool _autoChunk; private Exception _applicationException; private readonly IPEndPoint _localEndPoint; private readonly IPEndPoint _remoteEndPoint; private readonly Action<IFeatureCollection> _prepareRequest; public Frame(ConnectionContext context) : this(context, remoteEndPoint: null, localEndPoint: null, prepareRequest: null) { } public Frame(ConnectionContext context, IPEndPoint remoteEndPoint, IPEndPoint localEndPoint, Action<IFeatureCollection> prepareRequest) : base(context) { _remoteEndPoint = remoteEndPoint; _localEndPoint = localEndPoint; _prepareRequest = prepareRequest; FrameControl = this; Reset(); } public string Scheme { get; set; } public string Method { get; set; } public string RequestUri { get; set; } public string Path { get; set; } public string QueryString { get; set; } public string HttpVersion { get; set; } public IHeaderDictionary RequestHeaders { get; set; } public Stream RequestBody { get; set; } public int StatusCode { get; set; } public string ReasonPhrase { get; set; } public IHeaderDictionary ResponseHeaders { get; set; } public Stream ResponseBody { get; set; } public Stream DuplexStream { get; set; } public CancellationToken RequestAborted { get; set; } public bool HasResponseStarted { get { return _responseStarted; } } public void Reset() { _onStarting = null; _onCompleted = null; _responseStarted = false; _keepAlive = false; _autoChunk = false; _applicationException = null; _requestHeaders.Reset(); ResetResponseHeaders(); ResetFeatureCollection(); Scheme = null; Method = null; RequestUri = null; Path = null; QueryString = null; HttpVersion = null; RequestHeaders = _requestHeaders; RequestBody = null; StatusCode = 200; ReasonPhrase = null; ResponseHeaders = _responseHeaders; ResponseBody = null; DuplexStream = null; var httpConnectionFeature = this as IHttpConnectionFeature; httpConnectionFeature.RemoteIpAddress = _remoteEndPoint?.Address; httpConnectionFeature.RemotePort = _remoteEndPoint?.Port ?? 0; httpConnectionFeature.LocalIpAddress = _localEndPoint?.Address; httpConnectionFeature.LocalPort = _localEndPoint?.Port ?? 0; if (_remoteEndPoint != null && _localEndPoint != null) { httpConnectionFeature.IsLocal = _remoteEndPoint.Address.Equals(_localEndPoint.Address); } else { httpConnectionFeature.IsLocal = false; } _prepareRequest?.Invoke(this); _requestAbortCts?.Dispose(); } public void ResetResponseHeaders() { _responseHeaders.Reset(); _responseHeaders.HeaderServer = "Kestrel"; _responseHeaders.HeaderDate = DateHeaderValueManager.GetDateHeaderValue(); } /// <summary> /// Called once by Connection class to begin the RequestProcessingAsync loop. /// </summary> public void Start() { if (!_requestProcessingStarted) { _requestProcessingStarted = true; _requestProcessingTask = Task.Run(RequestProcessingAsync); } } /// <summary> /// Should be called when the server wants to initiate a shutdown. The Task returned will /// become complete when the RequestProcessingAsync function has exited. It is expected that /// Stop will be called on all active connections, and Task.WaitAll() will be called on every /// return value. /// </summary> public Task Stop() { if (!_requestProcessingStopping) { _requestProcessingStopping = true; } return _requestProcessingTask ?? TaskUtilities.CompletedTask; } /// <summary> /// Immediate kill the connection and poison the request and response streams. /// </summary> public void Abort() { _requestProcessingStopping = true; _requestAborted = true; _requestBody?.Abort(); _responseBody?.Abort(); try { ConnectionControl.End(ProduceEndType.SocketDisconnect); SocketInput.AbortAwaiting(); _disconnectCts.Cancel(); } catch (Exception ex) { Log.LogError("Abort", ex); } } /// <summary> /// Primary loop which consumes socket input, parses it for protocol framing, and invokes the /// application delegate for as long as the socket is intended to remain open. /// The resulting Task from this loop is preserved in a field which is used when the server needs /// to drain and close all currently active connections. /// </summary> public async Task RequestProcessingAsync() { try { var terminated = false; while (!terminated && !_requestProcessingStopping) { while (!terminated && !_requestProcessingStopping && !TakeStartLine(SocketInput)) { terminated = SocketInput.RemoteIntakeFin; if (!terminated) { await SocketInput; } } while (!terminated && !_requestProcessingStopping && !TakeMessageHeaders(SocketInput, _requestHeaders)) { terminated = SocketInput.RemoteIntakeFin; if (!terminated) { await SocketInput; } } if (!terminated && !_requestProcessingStopping) { var messageBody = MessageBody.For(HttpVersion, _requestHeaders, this); _keepAlive = messageBody.RequestKeepAlive; _requestBody = new FrameRequestStream(messageBody); RequestBody = _requestBody; _responseBody = new FrameResponseStream(this); ResponseBody = _responseBody; DuplexStream = new FrameDuplexStream(RequestBody, ResponseBody); _requestAbortCts = CancellationTokenSource.CreateLinkedTokenSource(_disconnectCts.Token); RequestAborted = _requestAbortCts.Token; var httpContext = HttpContextFactory.Create(this); try { await Application.Invoke(httpContext).ConfigureAwait(false); } catch (Exception ex) { ReportApplicationError(ex); } finally { // Trigger OnStarting if it hasn't been called yet and the app hasn't // already failed. If an OnStarting callback throws we can go through // our normal error handling in ProduceEnd. // https://github.com/aspnet/KestrelHttpServer/issues/43 if (!_responseStarted && _applicationException == null) { await FireOnStarting(); } await FireOnCompleted(); HttpContextFactory.Dispose(httpContext); // If _requestAbort is set, the connection has already been closed. if (!_requestAborted) { await ProduceEnd(); // Finish reading the request body in case the app did not. await messageBody.Consume(); } _requestBody.StopAcceptingReads(); _responseBody.StopAcceptingWrites(); } terminated = !_keepAlive; } Reset(); } } catch (Exception ex) { Log.LogWarning("Connection processing ended abnormally", ex); } finally { try { _disconnectCts.Dispose(); // If _requestAborted is set, the connection has already been closed. if (!_requestAborted) { // Inform client no more data will ever arrive ConnectionControl.End(ProduceEndType.SocketShutdownSend); // Wait for client to either disconnect or send unexpected data await SocketInput; // Dispose socket ConnectionControl.End(ProduceEndType.SocketDisconnect); } } catch (Exception ex) { Log.LogWarning("Connection shutdown abnormally", ex); } } } public void OnStarting(Func<object, Task> callback, object state) { lock (_onStartingSync) { if (_onStarting == null) { _onStarting = new List<KeyValuePair<Func<object, Task>, object>>(); } _onStarting.Add(new KeyValuePair<Func<object, Task>, object>(callback, state)); } } public void OnCompleted(Func<object, Task> callback, object state) { lock (_onCompletedSync) { if (_onCompleted == null) { _onCompleted = new List<KeyValuePair<Func<object, Task>, object>>(); } _onCompleted.Add(new KeyValuePair<Func<object, Task>, object>(callback, state)); } } private async Task FireOnStarting() { List<KeyValuePair<Func<object, Task>, object>> onStarting = null; lock (_onStartingSync) { onStarting = _onStarting; _onStarting = null; } if (onStarting != null) { try { foreach (var entry in onStarting) { await entry.Key.Invoke(entry.Value); } } catch (Exception ex) { ReportApplicationError(ex); } } } private async Task FireOnCompleted() { List<KeyValuePair<Func<object, Task>, object>> onCompleted = null; lock (_onCompletedSync) { onCompleted = _onCompleted; _onCompleted = null; } if (onCompleted != null) { foreach (var entry in onCompleted) { try { await entry.Key.Invoke(entry.Value); } catch (Exception ex) { ReportApplicationError(ex); } } } } public void Flush() { ProduceStartAndFireOnStarting(immediate: false).GetAwaiter().GetResult(); SocketOutput.Write(_emptyData, immediate: true); } public async Task FlushAsync(CancellationToken cancellationToken) { await ProduceStartAndFireOnStarting(immediate: false); await SocketOutput.WriteAsync(_emptyData, immediate: true, cancellationToken: cancellationToken); } public void Write(ArraySegment<byte> data) { ProduceStartAndFireOnStarting(immediate: false).GetAwaiter().GetResult(); if (_autoChunk) { if (data.Count == 0) { return; } WriteChunked(data); } else { SocketOutput.Write(data, immediate: true); } } public async Task WriteAsync(ArraySegment<byte> data, CancellationToken cancellationToken) { await ProduceStartAndFireOnStarting(immediate: false); if (_autoChunk) { if (data.Count == 0) { return; } await WriteChunkedAsync(data, cancellationToken); } else { await SocketOutput.WriteAsync(data, immediate: true, cancellationToken: cancellationToken); } } private void WriteChunked(ArraySegment<byte> data) { SocketOutput.Write(BeginChunkBytes(data.Count), immediate: false); SocketOutput.Write(data, immediate: false); SocketOutput.Write(_endChunkBytes, immediate: true); } private async Task WriteChunkedAsync(ArraySegment<byte> data, CancellationToken cancellationToken) { await SocketOutput.WriteAsync(BeginChunkBytes(data.Count), immediate: false, cancellationToken: cancellationToken); await SocketOutput.WriteAsync(data, immediate: false, cancellationToken: cancellationToken); await SocketOutput.WriteAsync(_endChunkBytes, immediate: true, cancellationToken: cancellationToken); } public static ArraySegment<byte> BeginChunkBytes(int dataCount) { var bytes = new byte[10] { _hex[((dataCount >> 0x1c) & 0x0f)], _hex[((dataCount >> 0x18) & 0x0f)], _hex[((dataCount >> 0x14) & 0x0f)], _hex[((dataCount >> 0x10) & 0x0f)], _hex[((dataCount >> 0x0c) & 0x0f)], _hex[((dataCount >> 0x08) & 0x0f)], _hex[((dataCount >> 0x04) & 0x0f)], _hex[((dataCount >> 0x00) & 0x0f)], (byte)'\r', (byte)'\n', }; // Determine the most-significant non-zero nibble int total, shift; total = (dataCount > 0xffff) ? 0x10 : 0x00; dataCount >>= total; shift = (dataCount > 0x00ff) ? 0x08 : 0x00; dataCount >>= shift; total |= shift; total |= (dataCount > 0x000f) ? 0x04 : 0x00; var offset = 7 - (total >> 2); return new ArraySegment<byte>(bytes, offset, 10 - offset); } private void WriteChunkedResponseSuffix() { SocketOutput.Write(_endChunkedResponseBytes, immediate: true); } private static ArraySegment<byte> CreateAsciiByteArraySegment(string text) { var bytes = Encoding.ASCII.GetBytes(text); return new ArraySegment<byte>(bytes); } public void ProduceContinue() { if (_responseStarted) return; StringValues expect; if (HttpVersion.Equals("HTTP/1.1") && RequestHeaders.TryGetValue("Expect", out expect) && (expect.FirstOrDefault() ?? "").Equals("100-continue", StringComparison.OrdinalIgnoreCase)) { SocketOutput.Write(_continueBytes); } } public async Task ProduceStartAndFireOnStarting(bool immediate = true) { if (_responseStarted) return; await FireOnStarting(); if (_applicationException != null) { throw new ObjectDisposedException( "The response has been aborted due to an unhandled application exception.", _applicationException); } await ProduceStart(immediate, appCompleted: false); } private async Task ProduceStart(bool immediate, bool appCompleted) { if (_responseStarted) return; _responseStarted = true; var status = ReasonPhrases.ToStatus(StatusCode, ReasonPhrase); var responseHeader = CreateResponseHeader(status, appCompleted); using (responseHeader.Item2) { await SocketOutput.WriteAsync(responseHeader.Item1, immediate: immediate); } } private async Task ProduceEnd() { if (_applicationException != null) { if (_responseStarted) { // We can no longer respond with a 500, so we simply close the connection. _requestProcessingStopping = true; return; } else { StatusCode = 500; ReasonPhrase = null; ResetResponseHeaders(); _responseHeaders.HeaderContentLength = "0"; } } await ProduceStart(immediate: true, appCompleted: true); // _autoChunk should be checked after we are sure ProduceStart() has been called // since ProduceStart() may set _autoChunk to true. if (_autoChunk) { WriteChunkedResponseSuffix(); } if (_keepAlive) { ConnectionControl.End(ProduceEndType.ConnectionKeepAlive); } } private Tuple<ArraySegment<byte>, IDisposable> CreateResponseHeader( string status, bool appCompleted) { var writer = new MemoryPoolTextWriter(Memory); writer.Write(HttpVersion); writer.Write(' '); writer.Write(status); writer.Write('\r'); writer.Write('\n'); var hasConnection = false; var hasTransferEncoding = false; var hasContentLength = false; foreach (var header in _responseHeaders) { var isConnection = false; if (!hasConnection && string.Equals(header.Key, "Connection", StringComparison.OrdinalIgnoreCase)) { hasConnection = isConnection = true; } else if (!hasTransferEncoding && string.Equals(header.Key, "Transfer-Encoding", StringComparison.OrdinalIgnoreCase)) { hasTransferEncoding = true; } else if (!hasContentLength && string.Equals(header.Key, "Content-Length", StringComparison.OrdinalIgnoreCase)) { hasContentLength = true; } foreach (var value in header.Value) { writer.Write(header.Key); writer.Write(':'); writer.Write(' '); writer.Write(value); writer.Write('\r'); writer.Write('\n'); if (isConnection && value.IndexOf("close", StringComparison.OrdinalIgnoreCase) != -1) { _keepAlive = false; } } } if (_keepAlive && !hasTransferEncoding && !hasContentLength) { if (appCompleted) { // Don't set the Content-Length or Transfer-Encoding headers // automatically for HEAD requests or 101, 204, 205, 304 responses. if (Method != "HEAD" && StatusCanHaveBody(StatusCode)) { // Since the app has completed and we are only now generating // the headers we can safely set the Content-Length to 0. writer.Write("Content-Length: 0\r\n"); } } else { if (HttpVersion == "HTTP/1.1") { _autoChunk = true; writer.Write("Transfer-Encoding: chunked\r\n"); } else { _keepAlive = false; } } } if (_keepAlive == false && hasConnection == false && HttpVersion == "HTTP/1.1") { writer.Write("Connection: close\r\n\r\n"); } else if (_keepAlive && hasConnection == false && HttpVersion == "HTTP/1.0") { writer.Write("Connection: keep-alive\r\n\r\n"); } else { writer.Write('\r'); writer.Write('\n'); } writer.Flush(); return new Tuple<ArraySegment<byte>, IDisposable>(writer.Buffer, writer); } private bool TakeStartLine(SocketInput input) { var scan = input.ConsumingStart(); var consumed = scan; try { var begin = scan; if (scan.Seek(' ') == -1) { return false; } var method = begin.GetAsciiString(scan); scan.Take(); begin = scan; var needDecode = false; var chFound = scan.Seek(' ', '?', '%'); if (chFound == '%') { needDecode = true; chFound = scan.Seek(' ', '?'); } var pathBegin = begin; var pathEnd = scan; var queryString = ""; if (chFound == '?') { begin = scan; if (scan.Seek(' ') != ' ') { return false; } queryString = begin.GetAsciiString(scan); } scan.Take(); begin = scan; if (scan.Seek('\r') == -1) { return false; } var httpVersion = begin.GetAsciiString(scan); scan.Take(); if (scan.Take() != '\n') { return false; } // URIs are always encoded/escaped to ASCII https://tools.ietf.org/html/rfc3986#page-11 // Multibyte Internationalized Resource Identifiers (IRIs) are first converted to utf8; // then encoded/escaped to ASCII https://www.ietf.org/rfc/rfc3987.txt "Mapping of IRIs to URIs" string requestUrlPath; if (needDecode) { // URI was encoded, unescape and then parse as utf8 pathEnd = UrlPathDecoder.Unescape(pathBegin, pathEnd); requestUrlPath = pathBegin.GetUtf8String(pathEnd); } else { // URI wasn't encoded, parse as ASCII requestUrlPath = pathBegin.GetAsciiString(pathEnd); } consumed = scan; Method = method; RequestUri = requestUrlPath; QueryString = queryString; HttpVersion = httpVersion; Path = RequestUri; return true; } finally { input.ConsumingComplete(consumed, scan); } } public static bool TakeMessageHeaders(SocketInput input, FrameRequestHeaders requestHeaders) { var scan = input.ConsumingStart(); var consumed = scan; try { int chFirst; int chSecond; while (!scan.IsEnd) { var beginName = scan; scan.Seek(':', '\r'); var endName = scan; chFirst = scan.Take(); var beginValue = scan; chSecond = scan.Take(); if (chFirst == -1 || chSecond == -1) { return false; } if (chFirst == '\r') { if (chSecond == '\n') { consumed = scan; return true; } throw new InvalidDataException("Malformed request"); } while ( chSecond == ' ' || chSecond == '\t' || chSecond == '\r' || chSecond == '\n') { if (chSecond == '\r') { var scanAhead = scan; var chAhead = scanAhead.Take(); if (chAhead == '\n') { chAhead = scanAhead.Take(); // If the "\r\n" isn't part of "linear whitespace", // then this header has no value. if (chAhead != ' ' && chAhead != '\t') { break; } } } beginValue = scan; chSecond = scan.Take(); } scan = beginValue; var wrapping = false; while (!scan.IsEnd) { if (scan.Seek('\r') == -1) { // no "\r" in sight, burn used bytes and go back to await more data return false; } var endValue = scan; chFirst = scan.Take(); // expecting: /r chSecond = scan.Take(); // expecting: /n if (chSecond != '\n') { // "\r" was all by itself, move just after it and try again scan = endValue; scan.Take(); continue; } var chThird = scan.Peek(); if (chThird == ' ' || chThird == '\t') { // special case, "\r\n " or "\r\n\t". // this is considered wrapping"linear whitespace" and is actually part of the header value // continue past this for the next wrapping = true; continue; } var name = beginName.GetArraySegment(endName); var value = beginValue.GetAsciiString(endValue); if (wrapping) { value = value.Replace("\r\n", " "); } consumed = scan; requestHeaders.Append(name.Array, name.Offset, name.Count, value); break; } } return false; } finally { input.ConsumingComplete(consumed, scan); } } public bool StatusCanHaveBody(int statusCode) { // List of status codes taken from Microsoft.Net.Http.Server.Response return statusCode != 101 && statusCode != 204 && statusCode != 205 && statusCode != 304; } private void ReportApplicationError(Exception ex) { _applicationException = ex; Log.ApplicationError(ex); } } }
1
6,726
Should move these out of frame into a static class? Similar to reason phrases?
aspnet-KestrelHttpServer
.cs
@@ -608,7 +608,7 @@ std::vector<RouteStep> buildIntersections(std::vector<RouteStep> steps) { auto &step = steps[step_index]; const auto instruction = step.maneuver.instruction; - if (instruction.type == TurnType::Suppressed) + if (instruction.type == TurnType::Suppressed || instruction.type == TurnType::UseLane) { BOOST_ASSERT(steps[last_valid_instruction].mode == step.mode); // count intersections. We cannot use exit, since intersections can follow directly
1
#include "engine/guidance/post_processing.hpp" #include "extractor/guidance/constants.hpp" #include "extractor/guidance/turn_instruction.hpp" #include "engine/guidance/assemble_steps.hpp" #include "engine/guidance/lane_processing.hpp" #include "engine/guidance/collapsing_utility.hpp" #include "util/bearing.hpp" #include "util/guidance/name_announcements.hpp" #include "util/guidance/turn_lanes.hpp" #include <boost/assert.hpp> #include <boost/numeric/conversion/cast.hpp> #include <boost/range/iterator_range.hpp> #include <algorithm> #include <cmath> #include <cstddef> #include <limits> #include <utility> using osrm::util::angularDeviation; using osrm::extractor::guidance::getTurnDirection; using osrm::extractor::guidance::hasRampType; using osrm::extractor::guidance::mirrorDirectionModifier; using osrm::extractor::guidance::bearingToDirectionModifier; namespace osrm { namespace engine { namespace guidance { namespace { void fixFinalRoundabout(std::vector<RouteStep> &steps) { for (std::size_t propagation_index = steps.size() - 1; propagation_index > 0; --propagation_index) { auto &propagation_step = steps[propagation_index]; propagation_step.maneuver.exit = 0; if (entersRoundabout(propagation_step.maneuver.instruction)) { // remember the current name as rotary name in tha case we end in a rotary if (propagation_step.maneuver.instruction.type == TurnType::EnterRotary || propagation_step.maneuver.instruction.type == TurnType::EnterRotaryAtExit) { propagation_step.rotary_name = propagation_step.name; propagation_step.rotary_pronunciation = propagation_step.pronunciation; } else if (propagation_step.maneuver.instruction.type == TurnType::EnterRoundaboutIntersection || propagation_step.maneuver.instruction.type == TurnType::EnterRoundaboutIntersectionAtExit) { propagation_step.maneuver.instruction.type = TurnType::EnterRoundabout; } return; } // accumulate turn data into the enter instructions else if (propagation_step.maneuver.instruction.type == TurnType::StayOnRoundabout) { // TODO this operates on the data that is in the instructions. // We are missing out on the final segment after the last stay-on-roundabout // instruction though. it is not contained somewhere until now steps[propagation_index - 1].ElongateBy(propagation_step); steps[propagation_index - 1].maneuver.exit = propagation_step.maneuver.exit; propagation_step.Invalidate(); } } } bool setUpRoundabout(RouteStep &step) { // basic entry into a roundabout // Special case handling, if an entry is directly tied to an exit const auto instruction = step.maneuver.instruction; if (instruction.type == TurnType::EnterRotaryAtExit || instruction.type == TurnType::EnterRoundaboutAtExit || instruction.type == TurnType::EnterRoundaboutIntersectionAtExit) { // Here we consider an actual entry, not an exit. We simply have to count the additional // exit step.maneuver.exit = 1; // prevent futher special case handling of these two. if (instruction.type == TurnType::EnterRotaryAtExit) step.maneuver.instruction.type = TurnType::EnterRotary; else if (instruction.type == TurnType::EnterRoundaboutAtExit) step.maneuver.instruction.type = TurnType::EnterRoundabout; else step.maneuver.instruction.type = TurnType::EnterRoundaboutIntersection; } if (leavesRoundabout(instruction)) { // This set-up, even though it looks the same, is actually looking at entering AND exiting step.maneuver.exit = 1; // count the otherwise missing exit // prevent futher special case handling of these two. if (instruction.type == TurnType::EnterAndExitRotary) step.maneuver.instruction.type = TurnType::EnterRotary; else if (instruction.type == TurnType::EnterAndExitRoundabout) step.maneuver.instruction.type = TurnType::EnterRoundabout; else step.maneuver.instruction.type = TurnType::EnterRoundaboutIntersection; return false; } else { return true; } } void closeOffRoundabout(const bool on_roundabout, std::vector<RouteStep> &steps, std::size_t step_index) { auto &step = steps[step_index]; step.maneuver.exit += 1; if (!on_roundabout) { BOOST_ASSERT(steps.size() >= 2); // We reached a special case that requires the addition of a special route step in the // beginning. We started in a roundabout, so to announce the exit, we move use the exit // instruction and move it right to the beginning to make sure to immediately announce the // exit. BOOST_ASSERT(leavesRoundabout(steps[1].maneuver.instruction) || steps[1].maneuver.instruction.type == TurnType::StayOnRoundabout || steps[1].maneuver.instruction.type == TurnType::Suppressed || steps[1].maneuver.instruction.type == TurnType::NoTurn || steps[1].maneuver.instruction.type == TurnType::UseLane); steps[0].geometry_end = 1; steps[1].geometry_begin = 0; steps[1].AddInFront(steps[0]); steps[1].intersections.erase(steps[1].intersections.begin()); // otherwise we copy the // source if (leavesRoundabout(steps[1].maneuver.instruction)) steps[1].maneuver.exit = 1; steps[0].duration = 0; steps[0].distance = 0; const auto exitToEnter = [](const TurnType::Enum type) { if (TurnType::ExitRotary == type) return TurnType::EnterRotary; // if we do not enter the roundabout Intersection, we cannot treat the full traversal as // a turn. So we switch it up to the roundabout type else if (type == TurnType::ExitRoundaboutIntersection) return TurnType::EnterRoundabout; else return TurnType::EnterRoundabout; }; steps[1].maneuver.instruction.type = exitToEnter(step.maneuver.instruction.type); if (steps[1].maneuver.instruction.type == TurnType::EnterRotary) { steps[1].rotary_name = steps[0].name; steps[1].rotary_pronunciation = steps[0].pronunciation; } } if (step_index > 1) { auto &exit_step = steps[step_index]; auto &prev_step = steps[step_index - 1]; // In case the step with the roundabout exit instruction cannot be merged with the // previous step we change the instruction to a normal turn if (!guidance::haveSameMode(exit_step, prev_step)) { BOOST_ASSERT(leavesRoundabout(exit_step.maneuver.instruction)); if (!entersRoundabout(prev_step.maneuver.instruction)) { prev_step.maneuver.instruction = exit_step.maneuver.instruction; } prev_step.maneuver.exit = exit_step.maneuver.exit; exit_step.maneuver.instruction.type = TurnType::Notification; step_index--; } } // Normal exit from the roundabout, or exit from a previously fixed roundabout. Propagate the // index back to the entering location and prepare the current silent set of instructions for // removal. std::vector<std::size_t> intermediate_steps; BOOST_ASSERT(!steps[step_index].intersections.empty()); // the very first intersection in the steps represents the location of the turn. Following // intersections are locations passed along the way const auto exit_intersection = steps[step_index].intersections.front(); const auto exit_bearing = exit_intersection.bearings[exit_intersection.out]; const auto destination_copy = step; if (step_index > 1) { // The very first route-step is head, so we cannot iterate past that one for (std::size_t propagation_index = step_index - 1; propagation_index > 0; --propagation_index) { auto &propagation_step = steps[propagation_index]; auto &next_step = steps[propagation_index + 1]; if (guidance::haveSameMode(propagation_step, next_step)) { propagation_step.ElongateBy(next_step); propagation_step.maneuver.exit = next_step.maneuver.exit; next_step.Invalidate(); } if (entersRoundabout(propagation_step.maneuver.instruction)) { const auto entry_intersection = propagation_step.intersections.front(); // remember rotary name if (propagation_step.maneuver.instruction.type == TurnType::EnterRotary || propagation_step.maneuver.instruction.type == TurnType::EnterRotaryAtExit) { propagation_step.rotary_name = propagation_step.name; propagation_step.rotary_pronunciation = propagation_step.pronunciation; } else if (propagation_step.maneuver.instruction.type == TurnType::EnterRoundaboutIntersection || propagation_step.maneuver.instruction.type == TurnType::EnterRoundaboutIntersectionAtExit) { BOOST_ASSERT(!propagation_step.intersections.empty()); const double angle = util::bearing::angleBetween( util::bearing::reverse(entry_intersection.bearings[entry_intersection.in]), exit_bearing); auto bearings = propagation_step.intersections.front().bearings; propagation_step.maneuver.instruction.direction_modifier = getTurnDirection(angle); } propagation_step.AdaptStepSignage(destination_copy); break; } } // remove exit } } } // namespace // Every Step Maneuver consists of the information until the turn. // This list contains a set of instructions, called silent, which should // not be part of the final output. // They are required for maintenance purposes. We can calculate the number // of exits to pass in a roundabout and the number of intersections // that we come across. std::vector<RouteStep> postProcess(std::vector<RouteStep> steps) { // the steps should always include the first/last step in form of a location BOOST_ASSERT(steps.size() >= 2); if (steps.size() == 2) return steps; // Count Street Exits forward bool on_roundabout = false; bool has_entered_roundabout = false; // count the exits forward. if enter/exit roundabout happen both, no further treatment is // required. We might end up with only one of them (e.g. starting within a roundabout) // or having a via-point in the roundabout. // In this case, exits are numbered from the start of the leg. for (std::size_t step_index = 0; step_index < steps.size(); ++step_index) { const auto next_step_index = step_index + 1; auto &step = steps[step_index]; const auto instruction = step.maneuver.instruction; if (entersRoundabout(instruction)) { has_entered_roundabout = setUpRoundabout(step); if (has_entered_roundabout && next_step_index < steps.size()) steps[next_step_index].maneuver.exit = step.maneuver.exit; } else if (instruction.type == TurnType::StayOnRoundabout) { on_roundabout = true; // increase the exit number we require passing the exit step.maneuver.exit += 1; if (next_step_index < steps.size()) steps[next_step_index].maneuver.exit = step.maneuver.exit; } else if (leavesRoundabout(instruction)) { // if (!has_entered_roundabout) // in case the we are not on a roundabout, the very first instruction // after the depart will be transformed into a roundabout and become // the first valid instruction closeOffRoundabout(has_entered_roundabout, steps, step_index); has_entered_roundabout = false; on_roundabout = false; } else if (on_roundabout && next_step_index < steps.size()) { steps[next_step_index].maneuver.exit = step.maneuver.exit; } } // unterminated roundabout // Move backwards through the instructions until the start and remove the exit number // A roundabout without exit translates to enter-roundabout if (has_entered_roundabout || on_roundabout) { fixFinalRoundabout(steps); } BOOST_ASSERT(steps.front().intersections.size() >= 1); BOOST_ASSERT(steps.front().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.front().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.front().maneuver.waypoint_type == WaypointType::Depart); BOOST_ASSERT(steps.back().intersections.size() == 1); BOOST_ASSERT(steps.back().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.back().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.back().maneuver.waypoint_type == WaypointType::Arrive); return removeNoTurnInstructions(std::move(steps)); } // Doing this step in post-processing provides a few challenges we cannot overcome. // The removal of an initial step imposes some copy overhead in the steps, moving all later // steps to the front. In addition, we cannot reduce the travel time that is accumulated at a // different location. // As a direct implication, we have to keep the time of the initial/final turns (which adds a // few seconds of inaccuracy at both ends. This is acceptable, however, since the turn should // usually not be as relevant. void trimShortSegments(std::vector<RouteStep> &steps, LegGeometry &geometry) { if (steps.size() < 2 || geometry.locations.size() <= 2) return; // if phantom node is located at the connection of two segments, either one can be selected // as // turn // // a --- b // | // c // // If a route from b to c is requested, both a--b and b--c could be selected as start // segment. // In case of a--b, we end up with an unwanted turn saying turn-right onto b-c. // These cases start off with an initial segment which is of zero length. // We have to be careful though, since routing that starts in a roundabout has a valid. // To catch these cases correctly, we have to perform trimming prior to the post-processing BOOST_ASSERT(geometry.locations.size() >= steps.size()); // Look for distances under 1m const bool zero_length_step = steps.front().distance <= 1 && steps.size() > 2; const bool duplicated_coordinate = util::coordinate_calculation::haversineDistance( geometry.locations[0], geometry.locations[1]) <= 1; if (zero_length_step || duplicated_coordinate) { // remove the initial distance value geometry.segment_distances.erase(geometry.segment_distances.begin()); const auto offset = zero_length_step ? geometry.segment_offsets[1] : 1; if (offset > 0) { // fixup the coordinates/annotations/ids geometry.locations.erase(geometry.locations.begin(), geometry.locations.begin() + offset); geometry.annotations.erase(geometry.annotations.begin(), geometry.annotations.begin() + offset); geometry.osm_node_ids.erase(geometry.osm_node_ids.begin(), geometry.osm_node_ids.begin() + offset); } // We have to adjust the first step both for its name and the bearings if (zero_length_step) { // since we are not only checking for epsilon but for a full meter, we can have multiple // coordinates here. Move all offsets to the front and reduce by one. (This is an // inplace forward one and reduce by one) std::transform(geometry.segment_offsets.begin() + 1, geometry.segment_offsets.end(), geometry.segment_offsets.begin(), [offset](const std::size_t val) { return val - offset; }); geometry.segment_offsets.pop_back(); const auto &current_depart = steps.front(); auto &designated_depart = *(steps.begin() + 1); // FIXME this is required to be consistent with the route durations. The initial // turn is not actually part of the route, though designated_depart.duration += current_depart.duration; // update initial turn direction/bearings. Due to the duplicated first coordinate, // the initial bearing is invalid designated_depart.maneuver.waypoint_type = WaypointType::Depart; designated_depart.maneuver.bearing_before = 0; designated_depart.maneuver.instruction = TurnInstruction::NO_TURN(); // we need to make this conform with the intersection format for the first intersection auto &first_intersection = designated_depart.intersections.front(); designated_depart.intersections.front().lanes = util::guidance::LaneTuple(); designated_depart.intersections.front().lane_description.clear(); first_intersection.bearings = {first_intersection.bearings[first_intersection.out]}; first_intersection.entry = {true}; first_intersection.in = IntermediateIntersection::NO_INDEX; first_intersection.out = 0; // finally remove the initial (now duplicated move) steps.erase(steps.begin()); } else { // we need to make this at least 1 because we will substract 1 // from all offsets at the end of the loop. steps.front().geometry_begin = 1; // reduce all offsets by one (inplace) std::transform(geometry.segment_offsets.begin(), geometry.segment_offsets.end(), geometry.segment_offsets.begin(), [](const std::size_t val) { return val - 1; }); } // and update the leg geometry indices for the removed entry std::for_each(steps.begin(), steps.end(), [offset](RouteStep &step) { step.geometry_begin -= offset; step.geometry_end -= offset; }); auto &first_step = steps.front(); // we changed the geometry, we need to recalculate the bearing auto bearing = std::round(util::coordinate_calculation::bearing( geometry.locations[first_step.geometry_begin], geometry.locations[first_step.geometry_begin + 1])); first_step.maneuver.bearing_after = bearing; first_step.intersections.front().bearings.front() = bearing; } BOOST_ASSERT(steps.front().intersections.size() >= 1); BOOST_ASSERT(steps.front().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.front().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.front().maneuver.waypoint_type == WaypointType::Depart); BOOST_ASSERT(steps.back().intersections.size() == 1); BOOST_ASSERT(steps.back().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.back().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.back().maneuver.waypoint_type == WaypointType::Arrive); // make sure we still have enough segments if (steps.size() < 2 || geometry.locations.size() == 2) return; BOOST_ASSERT(geometry.locations.size() >= steps.size()); auto &next_to_last_step = *(steps.end() - 2); // in the end, the situation with the roundabout cannot occur. As a result, we can remove // all zero-length instructions if (next_to_last_step.distance <= 1 && steps.size() > 2) { geometry.segment_offsets.pop_back(); // remove all the last coordinates from the geometry geometry.locations.resize(geometry.segment_offsets.back() + 1); geometry.annotations.resize(geometry.segment_offsets.back() + 1); geometry.osm_node_ids.resize(geometry.segment_offsets.back() + 1); BOOST_ASSERT(geometry.segment_distances.back() <= 1); geometry.segment_distances.pop_back(); next_to_last_step.maneuver.waypoint_type = WaypointType::Arrive; next_to_last_step.maneuver.instruction = TurnInstruction::NO_TURN(); next_to_last_step.maneuver.bearing_after = 0; next_to_last_step.intersections.front().lanes = util::guidance::LaneTuple(); next_to_last_step.intersections.front().lane_description.clear(); next_to_last_step.geometry_end = next_to_last_step.geometry_begin + 1; BOOST_ASSERT(next_to_last_step.intersections.size() == 1); auto &last_intersection = next_to_last_step.intersections.back(); last_intersection.bearings = {last_intersection.bearings[last_intersection.in]}; last_intersection.entry = {true}; last_intersection.out = IntermediateIntersection::NO_INDEX; last_intersection.in = 0; steps.pop_back(); // Because we eliminated a really short segment, it was probably // near an intersection. The convention is *not* to make the // turn, so the `arrive` instruction should be on the same road // as the segment before it. Thus, we have to copy the names // and travel modes from the new next_to_last step. auto &new_next_to_last = *(steps.end() - 2); next_to_last_step.AdaptStepSignage(new_next_to_last); next_to_last_step.mode = new_next_to_last.mode; next_to_last_step.classes = new_next_to_last.classes; // the geometry indices of the last step are already correct; } else if (util::coordinate_calculation::haversineDistance( geometry.locations[geometry.locations.size() - 2], geometry.locations[geometry.locations.size() - 1]) <= 1) { // correct steps but duplicated coordinate in the end. // This can happen if the last coordinate snaps to a node in the unpacked geometry geometry.locations.pop_back(); geometry.annotations.pop_back(); geometry.osm_node_ids.pop_back(); geometry.segment_offsets.back()--; // since the last geometry includes the location of arrival, the arrival instruction // geometry overlaps with the previous segment BOOST_ASSERT(next_to_last_step.geometry_end == steps.back().geometry_begin + 1); BOOST_ASSERT(next_to_last_step.geometry_begin < next_to_last_step.geometry_end); next_to_last_step.geometry_end--; auto &last_step = steps.back(); last_step.geometry_begin--; last_step.geometry_end--; BOOST_ASSERT(next_to_last_step.geometry_end == last_step.geometry_begin + 1); BOOST_ASSERT(last_step.geometry_begin == last_step.geometry_end - 1); BOOST_ASSERT(next_to_last_step.geometry_end >= 2); // we changed the geometry, we need to recalculate the bearing auto bearing = std::round(util::coordinate_calculation::bearing( geometry.locations[next_to_last_step.geometry_end - 2], geometry.locations[last_step.geometry_begin])); last_step.maneuver.bearing_before = bearing; last_step.intersections.front().bearings.front() = util::bearing::reverse(bearing); } BOOST_ASSERT(steps.back().geometry_end == geometry.locations.size()); BOOST_ASSERT(steps.front().intersections.size() >= 1); BOOST_ASSERT(steps.front().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.front().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.front().maneuver.waypoint_type == WaypointType::Depart); BOOST_ASSERT(steps.back().intersections.size() == 1); BOOST_ASSERT(steps.back().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.back().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.back().maneuver.waypoint_type == WaypointType::Arrive); } // assign relative locations to depart/arrive instructions std::vector<RouteStep> assignRelativeLocations(std::vector<RouteStep> steps, const LegGeometry &leg_geometry, const PhantomNode &source_node, const PhantomNode &target_node) { // We report the relative position of source/target to the road only within a range that is // sufficiently different but not full of the path BOOST_ASSERT(steps.size() >= 2); BOOST_ASSERT(leg_geometry.locations.size() >= 2); const constexpr double MINIMAL_RELATIVE_DISTANCE = 5., MAXIMAL_RELATIVE_DISTANCE = 300.; const auto distance_to_start = util::coordinate_calculation::haversineDistance( source_node.input_location, leg_geometry.locations[0]); const auto initial_modifier = distance_to_start >= MINIMAL_RELATIVE_DISTANCE && distance_to_start <= MAXIMAL_RELATIVE_DISTANCE ? bearingToDirectionModifier(util::coordinate_calculation::computeAngle( source_node.input_location, leg_geometry.locations[0], leg_geometry.locations[1])) : extractor::guidance::DirectionModifier::UTurn; steps.front().maneuver.instruction.direction_modifier = initial_modifier; const auto distance_from_end = util::coordinate_calculation::haversineDistance( target_node.input_location, leg_geometry.locations.back()); const auto final_modifier = distance_from_end >= MINIMAL_RELATIVE_DISTANCE && distance_from_end <= MAXIMAL_RELATIVE_DISTANCE ? bearingToDirectionModifier(util::coordinate_calculation::computeAngle( leg_geometry.locations[leg_geometry.locations.size() - 2], leg_geometry.locations[leg_geometry.locations.size() - 1], target_node.input_location)) : extractor::guidance::DirectionModifier::UTurn; steps.back().maneuver.instruction.direction_modifier = final_modifier; BOOST_ASSERT(steps.front().intersections.size() >= 1); BOOST_ASSERT(steps.front().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.front().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.front().maneuver.waypoint_type == WaypointType::Depart); BOOST_ASSERT(steps.back().intersections.size() == 1); BOOST_ASSERT(steps.back().intersections.front().bearings.size() == 1); BOOST_ASSERT(steps.back().intersections.front().entry.size() == 1); BOOST_ASSERT(steps.back().maneuver.waypoint_type == WaypointType::Arrive); return steps; } LegGeometry resyncGeometry(LegGeometry leg_geometry, const std::vector<RouteStep> &steps) { // The geometry uses an adjacency array-like structure for representation. // To sync it back up with the steps, we cann add a segment for every step. leg_geometry.segment_offsets.clear(); leg_geometry.segment_distances.clear(); leg_geometry.segment_offsets.push_back(0); for (const auto &step : steps) { leg_geometry.segment_distances.push_back(step.distance); // the leg geometry does not follow the begin/end-convetion. So we have to subtract one // to get the back-index. leg_geometry.segment_offsets.push_back(step.geometry_end - 1); } // remove the data from the reached-target step again leg_geometry.segment_offsets.pop_back(); leg_geometry.segment_distances.pop_back(); return leg_geometry; } std::vector<RouteStep> buildIntersections(std::vector<RouteStep> steps) { std::size_t last_valid_instruction = 0; for (std::size_t step_index = 0; step_index < steps.size(); ++step_index) { auto &step = steps[step_index]; const auto instruction = step.maneuver.instruction; if (instruction.type == TurnType::Suppressed) { BOOST_ASSERT(steps[last_valid_instruction].mode == step.mode); // count intersections. We cannot use exit, since intersections can follow directly // after a roundabout steps[last_valid_instruction].ElongateBy(step); steps[step_index].Invalidate(); } else if (!isSilent(instruction)) { // End of road is a turn that helps to identify the location of a turn. If the turn does // not pass by any oter intersections, the end-of-road characteristic does not improve // the instructions. // Here we reduce the verbosity of our output by reducing end-of-road emissions in cases // where no intersections have been passed in between. // Since the instruction is located at the beginning of a step, we need to check the // previous instruction. if (instruction.type == TurnType::EndOfRoad) { BOOST_ASSERT(step_index > 0); const auto &previous_step = steps[last_valid_instruction]; if (previous_step.intersections.size() < MIN_END_OF_ROAD_INTERSECTIONS) step.maneuver.instruction.type = TurnType::Turn; } // Remember the last non silent instruction last_valid_instruction = step_index; } } return removeNoTurnInstructions(std::move(steps)); } } // namespace guidance } // namespace engine } // namespace osrm
1
22,238
That means we still create `UseLane` instructions but threat them like `Suppressed`? Can we maybe change this in the pre-processing so that we will just emit `Suppress` instructions in these cases?
Project-OSRM-osrm-backend
cpp
@@ -21,14 +21,13 @@ import ( "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" - "github.com/iotexproject/iotex-proto/golang/iotextypes" - "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/db/trie" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/state" + "github.com/iotexproject/iotex-proto/golang/iotextypes" ) type (
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package evm import ( "bytes" "context" "encoding/hex" "fmt" "math/big" "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" "go.uber.org/zap" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/db/trie" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/state" ) type ( // deleteAccount records the account/contract to be deleted deleteAccount map[hash.Hash160]struct{} // contractMap records the contracts being changed contractMap map[hash.Hash160]Contract // preimageMap records the preimage of hash reported by VM preimageMap map[common.Hash]SerializableBytes // GetBlockHash gets block hash by height GetBlockHash func(uint64) (hash.Hash256, error) // DepositGas deposits gas DepositGas func(context.Context, protocol.StateManager, *big.Int) (*action.TransactionLog, error) // StateDBAdapter represents the state db adapter for evm to access iotx blockchain StateDBAdapter struct { sm protocol.StateManager logs []*action.Log transactionLogs []*action.TransactionLog err error blockHeight uint64 executionHash hash.Hash256 refund uint64 cachedContract contractMap contractSnapshot map[int]contractMap // snapshots of contracts suicided deleteAccount // account/contract calling Suicide suicideSnapshot map[int]deleteAccount // snapshots of suicide accounts preimages preimageMap preimageSnapshot map[int]preimageMap notFixTopicCopyBug bool asyncContractTrie bool } ) // StateDBOption set StateDBAdapter construction param type StateDBOption func(*StateDBAdapter) error // NewStateDBAdapter creates a new state db with iotex blockchain func NewStateDBAdapter( sm protocol.StateManager, blockHeight uint64, notFixTopicCopyBug bool, asyncContractTrie bool, executionHash hash.Hash256, opts ...StateDBOption, ) *StateDBAdapter { s := &StateDBAdapter{ sm: sm, logs: []*action.Log{}, err: nil, blockHeight: blockHeight, executionHash: executionHash, cachedContract: make(contractMap), contractSnapshot: make(map[int]contractMap), suicided: make(deleteAccount), suicideSnapshot: make(map[int]deleteAccount), preimages: make(preimageMap), preimageSnapshot: make(map[int]preimageMap), notFixTopicCopyBug: notFixTopicCopyBug, asyncContractTrie: asyncContractTrie, } for _, opt := range opts { if err := opt(s); err != nil { log.L().Panic("failed to execute stateDB creation option") } } return s } func (stateDB *StateDBAdapter) logError(err error) { if stateDB.err == nil { stateDB.err = err } } // Error returns the first stored error during evm contract execution func (stateDB *StateDBAdapter) Error() error { return stateDB.err } // CreateAccount creates an account in iotx blockchain func (stateDB *StateDBAdapter) CreateAccount(evmAddr common.Address) { addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return } _, err = accountutil.LoadOrCreateAccount(stateDB.sm, addr.String()) if err != nil { log.L().Error("Failed to create account.", zap.Error(err)) stateDB.logError(err) return } log.L().Debug("Called CreateAccount.", log.Hex("addrHash", evmAddr[:])) } // SubBalance subtracts balance from account func (stateDB *StateDBAdapter) SubBalance(evmAddr common.Address, amount *big.Int) { if amount.Cmp(big.NewInt(int64(0))) == 0 { return } // stateDB.GetBalance(evmAddr) log.L().Debug(fmt.Sprintf("SubBalance %v from %s", amount, evmAddr.Hex())) addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return } state, err := stateDB.AccountState(addr.String()) if err != nil { log.L().Error("Failed to sub balance.", zap.Error(err)) stateDB.logError(err) return } if err := state.SubBalance(amount); err != nil { log.L().Error("Failed to sub balance.", zap.Error(err)) stateDB.logError(err) return } if err := accountutil.StoreAccount(stateDB.sm, addr, state); err != nil { log.L().Error("Failed to update pending account changes to trie.", zap.Error(err)) stateDB.logError(err) } } // AddBalance adds balance to account func (stateDB *StateDBAdapter) AddBalance(evmAddr common.Address, amount *big.Int) { if amount.Cmp(big.NewInt(int64(0))) == 0 { return } // stateDB.GetBalance(evmAddr) log.L().Debug(fmt.Sprintf("AddBalance %v to %s", amount, evmAddr.Hex())) addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return } var state *state.Account addrHash := hash.BytesToHash160(evmAddr[:]) if contract, ok := stateDB.cachedContract[addrHash]; ok { state = contract.SelfState() } else { state, err = accountutil.LoadOrCreateAccount(stateDB.sm, addr.String()) if err != nil { log.L().Error("Failed to add balance.", log.Hex("addrHash", evmAddr[:])) stateDB.logError(err) return } } if err := state.AddBalance(amount); err != nil { log.L().Error("Failed to add balance.", zap.Error(err)) stateDB.logError(err) return } if err := accountutil.StoreAccount(stateDB.sm, addr, state); err != nil { log.L().Error("Failed to update pending account changes to trie.", zap.Error(err)) stateDB.logError(err) } } // GetBalance gets the balance of account func (stateDB *StateDBAdapter) GetBalance(evmAddr common.Address) *big.Int { addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return big.NewInt(0) } state, err := stateDB.AccountState(addr.String()) if err != nil { log.L().Error("Failed to get balance.", zap.Error(err)) return big.NewInt(0) } log.L().Debug(fmt.Sprintf("Balance of %s is %v", evmAddr.Hex(), state.Balance)) return state.Balance } // GetNonce gets the nonce of account func (stateDB *StateDBAdapter) GetNonce(evmAddr common.Address) uint64 { addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return 0 } state, err := stateDB.AccountState(addr.String()) if err != nil { log.L().Error("Failed to get nonce.", zap.Error(err)) // stateDB.logError(err) return 0 } log.L().Debug("Called GetNonce.", zap.String("address", addr.String()), zap.Uint64("nonce", state.Nonce)) return state.Nonce } // SetNonce sets the nonce of account func (stateDB *StateDBAdapter) SetNonce(evmAddr common.Address, nonce uint64) { addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return } s, err := stateDB.AccountState(addr.String()) if err != nil { log.L().Error("Failed to set nonce.", zap.Error(err)) // stateDB.logError(err) return } log.L().Debug("Called SetNonce.", zap.String("address", addr.String()), zap.Uint64("nonce", nonce)) s.Nonce = nonce if err := accountutil.StoreAccount(stateDB.sm, addr, s); err != nil { log.L().Error("Failed to set nonce.", zap.Error(err)) stateDB.logError(err) } } // SubRefund subtracts refund func (stateDB *StateDBAdapter) SubRefund(gas uint64) { log.L().Debug("Called SubRefund.", zap.Uint64("gas", gas)) // stateDB.journal.append(refundChange{prev: self.refund}) if gas > stateDB.refund { panic("Refund counter not enough!") } stateDB.refund -= gas } // AddRefund adds refund func (stateDB *StateDBAdapter) AddRefund(gas uint64) { log.L().Debug("Called AddRefund.", zap.Uint64("gas", gas)) // stateDB.journal.append(refundChange{prev: self.refund}) stateDB.refund += gas } // GetRefund gets refund func (stateDB *StateDBAdapter) GetRefund() uint64 { log.L().Debug("Called GetRefund.") return stateDB.refund } // Suicide kills the contract func (stateDB *StateDBAdapter) Suicide(evmAddr common.Address) bool { addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return false } if !stateDB.Exist(evmAddr) { log.L().Debug("Account does not exist.", zap.String("address", addr.String())) return false } s, err := stateDB.AccountState(addr.String()) if err != nil { log.L().Debug("Failed to get account.", zap.String("address", addr.String())) return false } // clears the account balance s.Balance = nil s.Balance = big.NewInt(0) addrHash := hash.BytesToHash160(evmAddr.Bytes()) if _, err := stateDB.sm.PutState(s, protocol.LegacyKeyOption(addrHash)); err != nil { log.L().Error("Failed to kill contract.", zap.Error(err)) stateDB.logError(err) return false } // mark it as deleted stateDB.suicided[addrHash] = struct{}{} return true } // HasSuicided returns whether the contract has been killed func (stateDB *StateDBAdapter) HasSuicided(evmAddr common.Address) bool { addrHash := hash.BytesToHash160(evmAddr.Bytes()) _, ok := stateDB.suicided[addrHash] return ok } // Exist checks the existence of an address func (stateDB *StateDBAdapter) Exist(evmAddr common.Address) bool { addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return false } log.L().Debug("Check existence.", zap.String("address", addr.String()), log.Hex("addrHash", evmAddr[:])) addrHash := hash.BytesToHash160(addr.Bytes()) if _, ok := stateDB.cachedContract[addrHash]; ok { return true } recorded, err := accountutil.Recorded(stateDB.sm, addr) if !recorded || err != nil { log.L().Debug("Account does not exist.", zap.String("address", addr.String())) return false } return true } // Empty returns true if the the contract is empty func (stateDB *StateDBAdapter) Empty(evmAddr common.Address) bool { addr, err := address.FromBytes(evmAddr.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return true } log.L().Debug("Check whether the contract is empty.") s, err := stateDB.AccountState(addr.String()) if err != nil { return true } // TODO: delete hash.ZeroHash256 return s.Nonce == 0 && s.Balance.Sign() == 0 && (len(s.CodeHash) == 0 || bytes.Equal(s.CodeHash, hash.ZeroHash256[:])) } // RevertToSnapshot reverts the state factory to the state at a given snapshot func (stateDB *StateDBAdapter) RevertToSnapshot(snapshot int) { if err := stateDB.sm.Revert(snapshot); err != nil { err := errors.New("unexpected error: state manager's Revert() failed") log.L().Error("Failed to revert to snapshot.", zap.Error(err)) stateDB.logError(err) return } ds, ok := stateDB.suicideSnapshot[snapshot] if !ok { // this should not happen, b/c we save the suicide accounts on a successful return of Snapshot(), but check anyway log.L().Error("Failed to get snapshot.", zap.Int("snapshot", snapshot)) return } // restore the suicide accounts stateDB.suicided = nil stateDB.suicided = ds // restore modified contracts stateDB.cachedContract = nil stateDB.cachedContract = stateDB.contractSnapshot[snapshot] for addr, c := range stateDB.cachedContract { if err := c.LoadRoot(); err != nil { log.L().Error("Failed to load root for contract.", zap.Error(err), log.Hex("addrHash", addr[:])) return } } // restore preimages stateDB.preimages = nil stateDB.preimages = stateDB.preimageSnapshot[snapshot] } // Snapshot returns the snapshot id func (stateDB *StateDBAdapter) Snapshot() int { sn := stateDB.sm.Snapshot() if _, ok := stateDB.suicideSnapshot[sn]; ok { err := errors.New("unexpected error: duplicate snapshot version") log.L().Error("Failed to snapshot.", zap.Error(err)) // stateDB.err = err return sn } // save a copy of current suicide accounts sa := make(deleteAccount) for k, v := range stateDB.suicided { sa[k] = v } stateDB.suicideSnapshot[sn] = sa // save a copy of modified contracts c := make(contractMap) for k, v := range stateDB.cachedContract { c[k] = v.Snapshot() } stateDB.contractSnapshot[sn] = c // save a copy of preimages p := make(preimageMap) for k, v := range stateDB.preimages { p[k] = v } stateDB.preimageSnapshot[sn] = p return sn } // AddLog adds log func (stateDB *StateDBAdapter) AddLog(evmLog *types.Log) { log.L().Debug("Called AddLog.", zap.Any("log", evmLog)) addr, err := address.FromBytes(evmLog.Address.Bytes()) if err != nil { log.L().Error("Failed to convert evm address.", zap.Error(err)) return } var topics []hash.Hash256 for _, evmTopic := range evmLog.Topics { var topic hash.Hash256 copy(topic[:], evmTopic.Bytes()) topics = append(topics, topic) } if topics[0] == inContractTransfer { if len(topics) != 3 { panic("Invalid in contract transfer topics") } from, _ := address.FromBytes(topics[1][12:]) to, _ := address.FromBytes(topics[2][12:]) stateDB.transactionLogs = append(stateDB.transactionLogs, &action.TransactionLog{ Type: iotextypes.TransactionLogType_IN_CONTRACT_TRANSFER, Sender: from.String(), Recipient: to.String(), Amount: new(big.Int).SetBytes(evmLog.Data), }) return } stateDB.logs = append(stateDB.logs, &action.Log{ Address: addr.String(), Topics: topics, Data: evmLog.Data, BlockHeight: stateDB.blockHeight, ActionHash: stateDB.executionHash, NotFixTopicCopyBug: stateDB.notFixTopicCopyBug, }) } // Logs returns the logs func (stateDB *StateDBAdapter) Logs() []*action.Log { return stateDB.logs } // TransactionLogs returns the transaction logs func (stateDB *StateDBAdapter) TransactionLogs() []*action.TransactionLog { return stateDB.transactionLogs } // AddPreimage adds the preimage of a hash func (stateDB *StateDBAdapter) AddPreimage(hash common.Hash, preimage []byte) { if _, ok := stateDB.preimages[hash]; !ok { b := make([]byte, len(preimage)) copy(b, preimage) stateDB.preimages[hash] = b } } // ForEachStorage loops each storage func (stateDB *StateDBAdapter) ForEachStorage(addr common.Address, cb func(common.Hash, common.Hash) bool) error { ctt, err := stateDB.getContract(hash.BytesToHash160(addr[:])) if err != nil { // stateDB.err = err return err } iter, err := ctt.Iterator() if err != nil { // stateDB.err = err return err } for { key, value, err := iter.Next() if err == trie.ErrEndOfIterator { // hit the end of the iterator, exit now return nil } if err != nil { return err } ckey := common.Hash{} copy(ckey[:], key[:]) cvalue := common.Hash{} copy(cvalue[:], value[:]) if !cb(ckey, cvalue) { return nil } } return nil } // AccountState returns an account state func (stateDB *StateDBAdapter) AccountState(encodedAddr string) (*state.Account, error) { addr, err := address.FromString(encodedAddr) if err != nil { return nil, errors.Wrap(err, "failed to get public key hash from encoded address") } addrHash := hash.BytesToHash160(addr.Bytes()) if contract, ok := stateDB.cachedContract[addrHash]; ok { return contract.SelfState(), nil } return accountutil.LoadAccount(stateDB.sm, addrHash) } //====================================== // Contract functions //====================================== // GetCodeHash returns contract's code hash func (stateDB *StateDBAdapter) GetCodeHash(evmAddr common.Address) common.Hash { addr := hash.BytesToHash160(evmAddr[:]) codeHash := common.Hash{} if contract, ok := stateDB.cachedContract[addr]; ok { copy(codeHash[:], contract.SelfState().CodeHash) return codeHash } account, err := accountutil.LoadAccount(stateDB.sm, addr) if err != nil { log.L().Error("Failed to get code hash.", zap.Error(err)) // TODO (zhi) not all err should be logged // stateDB.logError(err) return codeHash } copy(codeHash[:], account.CodeHash) return codeHash } // GetCode returns contract's code func (stateDB *StateDBAdapter) GetCode(evmAddr common.Address) []byte { addr := hash.BytesToHash160(evmAddr[:]) if contract, ok := stateDB.cachedContract[addr]; ok { code, err := contract.GetCode() if err != nil { log.L().Error("Failed to get code hash.", zap.Error(err)) return nil } return code } account, err := accountutil.LoadAccount(stateDB.sm, addr) if err != nil { log.L().Error("Failed to load account state for address.", log.Hex("addrHash", addr[:])) return nil } var code SerializableBytes if _, err = stateDB.sm.State(&code, protocol.NamespaceOption(CodeKVNameSpace), protocol.KeyOption(account.CodeHash[:])); err != nil { // TODO: Suppress the as it's too much now //log.L().Error("Failed to get code from trie.", zap.Error(err)) return nil } return code[:] } // GetCodeSize gets the code size saved in hash func (stateDB *StateDBAdapter) GetCodeSize(evmAddr common.Address) int { code := stateDB.GetCode(evmAddr) if code == nil { return 0 } log.L().Debug("Called GetCodeSize.", log.Hex("addrHash", evmAddr[:])) return len(code) } // SetCode sets contract's code func (stateDB *StateDBAdapter) SetCode(evmAddr common.Address, code []byte) { addr := hash.BytesToHash160(evmAddr[:]) contract, err := stateDB.getContract(addr) if err != nil { log.L().Error("Failed to get contract.", zap.Error(err), log.Hex("addrHash", addr[:])) stateDB.logError(err) return } contract.SetCode(hash.Hash256b(code), code) } // GetCommittedState gets committed state func (stateDB *StateDBAdapter) GetCommittedState(evmAddr common.Address, k common.Hash) common.Hash { addr := hash.BytesToHash160(evmAddr[:]) contract, err := stateDB.getContract(addr) if err != nil { log.L().Error("Failed to get contract.", zap.Error(err), log.Hex("addrHash", addr[:])) stateDB.logError(err) return common.Hash{} } v, err := contract.GetCommittedState(hash.BytesToHash256(k[:])) if err != nil { log.L().Error("Failed to get committed state.", zap.Error(err)) stateDB.logError(err) return common.Hash{} } return common.BytesToHash(v) } // GetState gets state func (stateDB *StateDBAdapter) GetState(evmAddr common.Address, k common.Hash) common.Hash { addr := hash.BytesToHash160(evmAddr[:]) contract, err := stateDB.getContract(addr) if err != nil { log.L().Error("Failed to get contract.", zap.Error(err), log.Hex("addrHash", addr[:])) stateDB.logError(err) return common.Hash{} } v, err := contract.GetState(hash.BytesToHash256(k[:])) if err != nil { log.L().Debug("Failed to get state.", zap.Error(err)) stateDB.logError(err) return common.Hash{} } return common.BytesToHash(v) } // SetState sets state func (stateDB *StateDBAdapter) SetState(evmAddr common.Address, k, v common.Hash) { addr := hash.BytesToHash160(evmAddr[:]) contract, err := stateDB.getContract(addr) if err != nil { log.L().Error("Failed to get contract.", zap.Error(err), log.Hex("addrHash", addr[:])) stateDB.logError(err) return } log.L().Debug("Called SetState", log.Hex("addrHash", evmAddr[:]), log.Hex("k", k[:])) if err := contract.SetState(hash.BytesToHash256(k[:]), v[:]); err != nil { log.L().Error("Failed to set state.", zap.Error(err), log.Hex("addrHash", addr[:])) stateDB.logError(err) return } } // CommitContracts commits contract code to db and update pending contract account changes to trie func (stateDB *StateDBAdapter) CommitContracts() error { addrStrs := make([]string, 0) for addr := range stateDB.cachedContract { addrStrs = append(addrStrs, hex.EncodeToString(addr[:])) } sort.Strings(addrStrs) for _, addrStr := range addrStrs { var addr hash.Hash160 addrBytes, err := hex.DecodeString(addrStr) if err != nil { return errors.Wrap(err, "failed to decode address hash") } copy(addr[:], addrBytes) if _, ok := stateDB.suicided[addr]; ok { // no need to update a suicide account/contract continue } contract := stateDB.cachedContract[addr] if err := contract.Commit(); err != nil { stateDB.logError(err) return errors.Wrap(err, "failed to commit contract") } state := contract.SelfState() // store the account (with new storage trie root) into account trie if _, err := stateDB.sm.PutState(state, protocol.LegacyKeyOption(addr)); err != nil { stateDB.logError(err) return errors.Wrap(err, "failed to update pending account changes to trie") } } // delete suicided accounts/contract addrStrs = make([]string, 0) for addr := range stateDB.suicided { addrStrs = append(addrStrs, hex.EncodeToString(addr[:])) } sort.Strings(addrStrs) for _, addrStr := range addrStrs { var addr hash.Hash160 addrBytes, err := hex.DecodeString(addrStr) if err != nil { return errors.Wrap(err, "failed to decode address hash") } copy(addr[:], addrBytes) if _, err := stateDB.sm.DelState(protocol.LegacyKeyOption(addr)); err != nil { stateDB.logError(err) return errors.Wrapf(err, "failed to delete suicide account/contract %x", addr[:]) } } // write preimages to DB addrStrs = make([]string, 0) for addr := range stateDB.preimages { addrStrs = append(addrStrs, hex.EncodeToString(addr[:])) } sort.Strings(addrStrs) for _, addrStr := range addrStrs { var k common.Hash addrBytes, err := hex.DecodeString(addrStr) if err != nil { return errors.Wrap(err, "failed to decode address hash") } copy(k[:], addrBytes) v := stateDB.preimages[k] h := make([]byte, len(k)) copy(h, k[:]) stateDB.sm.PutState(v, protocol.NamespaceOption(PreimageKVNameSpace), protocol.KeyOption(h)) } return nil } // getContract returns the contract of addr func (stateDB *StateDBAdapter) getContract(addr hash.Hash160) (Contract, error) { if contract, ok := stateDB.cachedContract[addr]; ok { return contract, nil } return stateDB.getNewContract(addr) } func (stateDB *StateDBAdapter) getNewContract(addr hash.Hash160) (Contract, error) { account, err := accountutil.LoadAccount(stateDB.sm, addr) if err != nil { return nil, errors.Wrapf(err, "failed to load account state for address %x", addr) } contract, err := newContract(addr, account, stateDB.sm, stateDB.asyncContractTrie) if err != nil { return nil, errors.Wrapf(err, "failed to create storage trie for new contract %x", addr) } // add to contract cache stateDB.cachedContract[addr] = contract return contract, nil } // clear clears local changes func (stateDB *StateDBAdapter) clear() { stateDB.cachedContract = nil stateDB.contractSnapshot = nil stateDB.suicided = nil stateDB.suicideSnapshot = nil stateDB.preimages = nil stateDB.preimageSnapshot = nil stateDB.cachedContract = make(contractMap) stateDB.contractSnapshot = make(map[int]contractMap) stateDB.suicided = make(deleteAccount) stateDB.suicideSnapshot = make(map[int]deleteAccount) stateDB.preimages = make(preimageMap) stateDB.preimageSnapshot = make(map[int]preimageMap) }
1
22,843
no need to move here, iotex-proto is outside of iotex-core and considered same as iotex-address, go-pkgs
iotexproject-iotex-core
go
@@ -600,6 +600,14 @@ static void app_exit_event(void) { check_stack_alignment(); + +#if defined(LINUX) && defined(X86_64) + /* i#4335: Test allocation of more than 2.8GB in unreachable heap */ + for (int i = 0; i != 50; ++i) + { + malloc(100000000); + } +#endif } DR_EXPORT void
1
/* ********************************************************** * Copyright (c) 2012-2020 Google, Inc. All rights reserved. * Copyright (c) 2008 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "dr_api.h" #include "client_tools.h" #include "string.h" #define MINSERT instrlist_meta_preinsert #ifdef X64 /* we use this for clean call base-disp refs */ static reg_t buf[] = { 0xcafebabe, 0xfeedadad, 0xeeeeeeee, 0xbadcabee }; #endif #ifdef X86 /* buffers for testing reg_set_value_ex */ byte orig_reg_val_buf[64]; byte new_reg_val_buf[64]; static void print_error_on_fail(bool check) { if (!check) dr_fprintf(STDERR, "error\n"); } static void set_gpr() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_XAX, &mcontext, orig_reg_val_buf); reg_get_value_ex(DR_REG_XAX, &mcontext, new_reg_val_buf); new_reg_val_buf[0] = 0x75; new_reg_val_buf[2] = 0x83; new_reg_val_buf[3] = 0x23; bool succ = reg_set_value_ex(DR_REG_XAX, &mcontext, new_reg_val_buf); print_error_on_fail(succ); memset(new_reg_val_buf, 0, 64); dr_set_mcontext(drcontext, &mcontext); } static void check_gpr() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_XAX, &mcontext, new_reg_val_buf); print_error_on_fail(new_reg_val_buf[0] == 0x75); print_error_on_fail(new_reg_val_buf[2] == 0x83); print_error_on_fail(new_reg_val_buf[3] == 0x23); bool succ = reg_set_value_ex(DR_REG_XAX, &mcontext, orig_reg_val_buf); print_error_on_fail(succ); dr_set_mcontext(drcontext, &mcontext); } static void set_xmm() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_XMM0, &mcontext, orig_reg_val_buf); reg_get_value_ex(DR_REG_XMM0, &mcontext, new_reg_val_buf); new_reg_val_buf[0] = 0x77; new_reg_val_buf[2] = 0x89; new_reg_val_buf[14] = 0x21; bool succ = reg_set_value_ex(DR_REG_XMM0, &mcontext, new_reg_val_buf); print_error_on_fail(succ); memset(new_reg_val_buf, 0, 64); dr_set_mcontext(drcontext, &mcontext); } static void check_xmm() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_XMM0, &mcontext, new_reg_val_buf); print_error_on_fail(new_reg_val_buf[0] == 0x77); print_error_on_fail(new_reg_val_buf[2] == 0x89); print_error_on_fail(new_reg_val_buf[14] == 0x21); bool succ = reg_set_value_ex(DR_REG_XMM0, &mcontext, orig_reg_val_buf); print_error_on_fail(succ); dr_set_mcontext(drcontext, &mcontext); } static void set_ymm() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_YMM0, &mcontext, orig_reg_val_buf); reg_get_value_ex(DR_REG_YMM0, &mcontext, new_reg_val_buf); new_reg_val_buf[0] = 0x77; new_reg_val_buf[2] = 0x80; new_reg_val_buf[14] = 0x25; new_reg_val_buf[20] = 0x09; new_reg_val_buf[25] = 0x06; bool succ = reg_set_value_ex(DR_REG_YMM0, &mcontext, new_reg_val_buf); print_error_on_fail(succ); memset(new_reg_val_buf, 0, 64); dr_set_mcontext(drcontext, &mcontext); } static void check_ymm() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_YMM0, &mcontext, new_reg_val_buf); print_error_on_fail(new_reg_val_buf[0] == 0x77); print_error_on_fail(new_reg_val_buf[2] == 0x80); print_error_on_fail(new_reg_val_buf[14] == 0x25); print_error_on_fail(new_reg_val_buf[20] == 0x09); print_error_on_fail(new_reg_val_buf[25] == 0x06); bool succ = reg_set_value_ex(DR_REG_YMM0, &mcontext, orig_reg_val_buf); print_error_on_fail(succ); dr_set_mcontext(drcontext, &mcontext); } # ifdef __AVX512F__ static void set_zmm() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_ZMM0, &mcontext, orig_reg_val_buf); reg_get_value_ex(DR_REG_ZMM0, &mcontext, new_reg_val_buf); new_reg_val_buf[0] = 0x77; new_reg_val_buf[2] = 0x80; new_reg_val_buf[14] = 0x25; new_reg_val_buf[20] = 0x09; new_reg_val_buf[25] = 0x02; new_reg_val_buf[32] = 0x16; new_reg_val_buf[55] = 0x18; new_reg_val_buf[60] = 0x22; bool succ = reg_set_value_ex(DR_REG_ZMM0, &mcontext, new_reg_val_buf); print_error_on_fail(succ); memset(new_reg_val_buf, 0, 64); dr_set_mcontext(drcontext, &mcontext); } static void check_zmm() { check_stack_alignment(); void *drcontext = dr_get_current_drcontext(); dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; dr_get_mcontext(drcontext, &mcontext); reg_get_value_ex(DR_REG_ZMM0, &mcontext, new_reg_val_buf); print_error_on_fail(new_reg_val_buf[0] == 0x77); print_error_on_fail(new_reg_val_buf[2] == 0x80); print_error_on_fail(new_reg_val_buf[14] == 0x25); print_error_on_fail(new_reg_val_buf[20] == 0x09); print_error_on_fail(new_reg_val_buf[25] == 0x02); print_error_on_fail(new_reg_val_buf[32] == 0x16); print_error_on_fail(new_reg_val_buf[55] == 0x18); print_error_on_fail(new_reg_val_buf[60] == 0x22); bool succ = reg_set_value_ex(DR_REG_ZMM0, &mcontext, orig_reg_val_buf); print_error_on_fail(succ); dr_set_mcontext(drcontext, &mcontext); } # endif #endif static void ind_call(reg_t a1, reg_t a2) { dr_fprintf(STDERR, "bar " PFX " " PFX "\n", a1, a2); } static void (*ind_call_ptr)(reg_t a1, reg_t a2) = ind_call; static void foo(reg_t a1, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6, reg_t a7, reg_t a8) { check_stack_alignment(); dr_fprintf( STDERR, "foo " PFX " " PFX " " PFX " " PFX "\n " PFX " " PFX " " PFX " " PFX "\n", a1, /* printing addr of buf would be non-deterministic */ IF_X64_ELSE((a2 == (reg_t)buf) ? 1 : -1, a2), a3, a4, a5, a6, a7, a8); } static void bar(reg_t a1, reg_t a2) { check_stack_alignment(); /* test indirect call handling in clean call analysis */ ind_call_ptr(a1, a2); } static void save_test() { check_stack_alignment(); int i; void *drcontext = dr_get_current_drcontext(); dr_fprintf(STDERR, "verifying values\n"); if (*(((reg_t *)dr_get_tls_field(drcontext)) + 2) != 1) { dr_fprintf(STDERR, "Write to client tls from cache failed, got %d, expected %d\n", *(((reg_t *)dr_get_tls_field(drcontext)) + 2), 1); } for (i = SPILL_SLOT_1; i <= SPILL_SLOT_MAX; i++) { reg_t value = dr_read_saved_reg(drcontext, i); if (value != i + 1 - SPILL_SLOT_1) { dr_fprintf(STDERR, "slot %d value %d doesn't match expected value %d\n", i, value, i + 1 - SPILL_SLOT_1); } if (i % 2 == 0) { /* set every other reg */ value = 100 - i; dr_write_saved_reg(drcontext, i, value); } } } static int post_crash = 0; static void *tag_of_interest; static void restore_state_event(void *drcontext, void *tag, dr_mcontext_t *mcontext, bool restore_memory, bool app_code_consistent) { if (tag == tag_of_interest) { dr_fprintf(STDERR, "in restore_state for our clean call crash %d\n", post_crash); /* flush, so we can use different instrumentation next time */ dr_delay_flush_region(dr_fragment_app_pc(tag), 1, 0, NULL); } } static void cleancall_aflags_save(void) { dr_fprintf(STDERR, "cleancall_aflags_save\n"); } static void cleancall_no_aflags_save(void) { dr_fprintf(STDERR, "cleancall_no_aflags_save\n"); } static bool first_bb = true; static dr_emit_flags_t bb_event(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating) { instr_t *instr, *next_instr, *next_next_instr; bool modified = false; #define PRE(bb, i) instrlist_preinsert(bb, instr, INSTR_XL8(i, dr_fragment_app_pc(tag))) #define PREM(bb, i) \ instrlist_meta_preinsert(bb, instr, INSTR_XL8(i, dr_fragment_app_pc(tag))) if (first_bb) { instr_t *add, *cmp; /* test cleancall with/without aflags save * cleancall_aflags_save * cmp # fake cmp app instr * cleancall_no_aflags_save * add # fake add app instr */ first_bb = false; instr = instrlist_first(bb); cmp = INSTR_CREATE_cmp(drcontext, opnd_create_reg(DR_REG_XAX), opnd_create_reg(DR_REG_XAX)); PRE(bb, cmp); add = INSTR_CREATE_add(drcontext, opnd_create_reg(DR_REG_XAX), OPND_CREATE_INT32(0)); PRE(bb, add); dr_insert_clean_call(drcontext, bb, add, (void *)cleancall_no_aflags_save, false, 0); dr_insert_clean_call(drcontext, bb, cmp, (void *)cleancall_aflags_save, false, 0); #ifdef X86 /* Other unrelated tests for setting register values. */ dr_insert_clean_call(drcontext, bb, instr, set_gpr, false, 0); dr_insert_clean_call(drcontext, bb, instr, check_gpr, false, 0); dr_insert_clean_call(drcontext, bb, instr, set_xmm, false, 0); dr_insert_clean_call(drcontext, bb, instr, check_xmm, false, 0); dr_insert_clean_call(drcontext, bb, instr, set_ymm, false, 0); dr_insert_clean_call(drcontext, bb, instr, check_ymm, false, 0); # ifdef __AVX512F__ dr_insert_clean_call(drcontext, bb, instr, set_zmm, false, 0); dr_insert_clean_call(drcontext, bb, instr, check_zmm, false, 0); # endif #endif } /* Look for 3 nops to indicate handler is set up */ for (instr = instrlist_first(bb); instr != NULL; instr = next_instr) { next_instr = instr_get_next(instr); if (next_instr != NULL) next_next_instr = instr_get_next(next_instr); else next_next_instr = NULL; if (instr_is_nop(instr) && next_instr != NULL && instr_is_nop(next_instr) && next_next_instr != NULL && instr_is_call_direct(next_next_instr)) { ASSERT(tag_of_interest == NULL || tag_of_interest == tag); tag_of_interest = tag; modified = true; /* # of crashes is tied to # of setjmps in cleancall.c */ if (post_crash == 0) { /* Test crash in 1st clean call arg */ dr_fprintf(STDERR, "inserting clean call crash code 1\n"); dr_insert_clean_call(drcontext, bb, instrlist_first(bb), (void *)foo, false /* don't save fp state */, 1, OPND_CREATE_ABSMEM(NULL, OPSZ_4)); post_crash++; } else if (post_crash == 1) { /* Test crash in 2nd clean call arg */ dr_fprintf(STDERR, "inserting clean call crash code 2\n"); dr_insert_clean_call(drcontext, bb, instrlist_first(bb), (void *)foo, false /* don't save fp state */, 2, OPND_CREATE_INT32(0), OPND_CREATE_ABSMEM(NULL, OPSZ_4)); post_crash++; } else if (post_crash == 2) { /* PR 307242: test xsp args */ reg_id_t scratch; #ifdef X64 # ifdef WINDOWS scratch = REG_XCX; # else scratch = REG_XDI; # endif #else scratch = REG_XAX; #endif dr_fprintf(STDERR, "inserting xsp arg testing\n"); /* See notes below: we crash after, so can clobber regs */ PRE(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(scratch), OPND_CREATE_INT32(sizeof(reg_t)))); PRE(bb, INSTR_CREATE_push_imm(drcontext, OPND_CREATE_INT32((int)0xbcbcaba0))); PRE(bb, INSTR_CREATE_push_imm(drcontext, OPND_CREATE_INT32((int)0xbcbcaba1))); dr_insert_clean_call( drcontext, bb, instr, (void *)bar, false /* don't save fp state */, 2, OPND_CREATE_MEM32(REG_XSP, 0), /* test conflicting w/ scratch reg */ opnd_create_base_disp(REG_XSP, scratch, 1, 0, OPSZ_PTR)); /* Even though we'll be doing a longjmp, building w/ VS2010 results * in silent failure on handling the exception so we restore xsp. */ PRE(bb, INSTR_CREATE_lea( drcontext, opnd_create_reg(REG_XSP), OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, 2 * sizeof(reg_t)))); PRE(bb, INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(REG_XAX), OPND_CREATE_ABSMEM(NULL, OPSZ_PTR))); post_crash++; } else if (post_crash == 3) { #ifdef X86_64 /* For x64, test using calling convention regs as params. * We do different things depending on order, whether a * memory reference, etc. * To test our values, we clobber app registers. The app * has a setjmp set up, so we crash after for a deterministic * result. */ dr_fprintf(STDERR, "inserting clean call arg testing\n"); /* We do not translate the regs back */ /* We arrange to have our base-disps all be small offsets off buf */ PRE(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(REG_RDX), OPND_CREATE_INT32(sizeof(reg_t)))); PRE(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(REG_RCX), OPND_CREATE_INTPTR(buf))); PRE(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(REG_R8), OPND_CREATE_INT32(-42))); PRE(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(REG_R9), OPND_CREATE_INT32((int)0xdeadbeef))); PRE(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(REG_RAX), OPND_CREATE_INT32(2 * sizeof(reg_t)))); PRE(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(REG_RBP), OPND_CREATE_INT32(3 * sizeof(reg_t)))); dr_insert_clean_call( drcontext, bb, instr, (void *)foo, false /* don't save fp state */, 8, /* Pick registers used by both Windows and Linux */ opnd_create_reg(REG_RDX), opnd_create_reg(REG_RCX), opnd_create_reg(REG_R9), opnd_create_reg(REG_R8), OPND_CREATE_MEM32(REG_RCX, 0), /* test having only index register conflict */ opnd_create_base_disp(REG_RBP, REG_RCX, 1, 0, OPSZ_PTR), /* test OPSZ_4, and using register modified * by clean call setup (rax) */ opnd_create_base_disp(REG_RAX, REG_RCX, 1, 0, OPSZ_4), /* test having both base and index conflict */ opnd_create_base_disp(REG_RDX, REG_RCX, 1, 0, OPSZ_PTR)); #endif PRE(bb, INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(REG_XAX), OPND_CREATE_ABSMEM(NULL, OPSZ_PTR))); post_crash++; } else { /* Test register saving and restoring and access to saved registers * from outside the cache. */ int i; instr_t *fault = INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(REG_XAX), OPND_CREATE_ABSMEM(NULL, OPSZ_PTR)); instr_t *post_fault = INSTR_CREATE_label(drcontext); dr_fprintf(STDERR, "inserting saved reg access testing\n"); /* we want to test all the slots so juggle around to save xax and flags * to client's tls. */ dr_save_reg(drcontext, bb, instr, REG_XBX, SPILL_SLOT_1); dr_insert_read_tls_field(drcontext, bb, instr, REG_XBX); PREM(bb, INSTR_CREATE_mov_st(drcontext, opnd_create_base_disp(REG_XBX, REG_NULL, 0, 0 * sizeof(reg_t), OPSZ_PTR), opnd_create_reg(REG_XAX))); dr_save_arith_flags(drcontext, bb, instr, SPILL_SLOT_2); PREM(bb, INSTR_CREATE_mov_st(drcontext, opnd_create_base_disp(REG_XBX, REG_NULL, 0, 1 * sizeof(reg_t), OPSZ_PTR), opnd_create_reg(REG_XAX))); PREM(bb, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INT32(1))); /* test tls writing */ PREM(bb, INSTR_CREATE_mov_st(drcontext, opnd_create_base_disp(REG_XBX, REG_NULL, 0, 2 * sizeof(reg_t), OPSZ_PTR), opnd_create_reg(REG_XAX))); dr_restore_reg(drcontext, bb, instr, REG_XBX, SPILL_SLOT_1); /* now test the slots */ /* xax is our tls + 0, flags is our tls + sizeof(reg_t) */ for (i = SPILL_SLOT_1; i <= SPILL_SLOT_MAX; i++) { dr_save_reg(drcontext, bb, instr, REG_XAX, i); PREM(bb, INSTR_CREATE_inc(drcontext, opnd_create_reg(REG_XAX))); } dr_insert_clean_call(drcontext, bb, instr, (void *)save_test, true /* try saving the fp state */, 0); for (i = SPILL_SLOT_1; i <= SPILL_SLOT_MAX; i++) { /* test using opnd */ if (i < dr_max_opnd_accessible_spill_slot()) { PREM(bb, INSTR_CREATE_cmp( drcontext, dr_reg_spill_slot_opnd(drcontext, i), (i % 2 == 0) ? OPND_CREATE_INT8(100 - i) : OPND_CREATE_INT8(i + 1 - SPILL_SLOT_1))); PREM(bb, INSTR_CREATE_jcc(drcontext, OP_jne, opnd_create_instr(fault))); } /* test using restore routine */ dr_restore_reg(drcontext, bb, instr, REG_XAX, i); PREM(bb, INSTR_CREATE_cmp(drcontext, opnd_create_reg(REG_XAX), (i % 2 == 0) ? OPND_CREATE_INT8(100 - i) : OPND_CREATE_INT8(i + 1 - SPILL_SLOT_1))); PREM(bb, INSTR_CREATE_jcc(drcontext, OP_jne, opnd_create_instr(fault))); } PREM(bb, INSTR_CREATE_jmp_short(drcontext, opnd_create_instr(post_fault))); PRE(bb, fault); /* pre not prem since we want this to be an app fault */ PREM(bb, post_fault); /* Now juggle xax and flags back from client tls */ dr_save_reg(drcontext, bb, instr, REG_XBX, SPILL_SLOT_1); dr_insert_read_tls_field(drcontext, bb, instr, REG_XBX); PREM(bb, INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(REG_XAX), opnd_create_base_disp(REG_XBX, REG_NULL, 0, 1 * sizeof(reg_t), OPSZ_PTR))); dr_restore_arith_flags(drcontext, bb, instr, SPILL_SLOT_MAX); PREM(bb, INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(REG_XAX), opnd_create_base_disp(REG_XBX, REG_NULL, 0, 0 * sizeof(reg_t), OPSZ_PTR))); dr_restore_reg(drcontext, bb, instr, REG_XBX, SPILL_SLOT_1); #if VERBOSE /* for debugging */ instrlist_disassemble(drcontext, tag, bb, dr_get_stdout_file()); #endif post_crash++; /* note we don't actually crash so this must be the * last test */ } } } if (modified) /* store since not constant instrumentation */ return DR_EMIT_STORE_TRANSLATIONS; else return DR_EMIT_DEFAULT; } static void thread_exit(void *drcontext) { dr_thread_free(drcontext, dr_get_tls_field(drcontext), 3 * sizeof(reg_t)); } static void thread_start(void *drcontext) { dr_set_tls_field(drcontext, dr_thread_alloc(drcontext, 3 * sizeof(reg_t))); } static void app_exit_event(void) { check_stack_alignment(); } DR_EXPORT void dr_init(client_id_t id) { dr_register_bb_event(bb_event); dr_register_thread_init_event(thread_start); dr_register_thread_exit_event(thread_exit); dr_register_restore_state_event(restore_state_event); dr_register_exit_event(app_exit_event); }
1
21,142
Looks like a clang-format style violation: should turn Travis red.
DynamoRIO-dynamorio
c
@@ -127,6 +127,11 @@ MESSAGE # Run all examples if none match the configured filters (default: `false`). add_setting :run_all_when_everything_filtered + + # Allow user to configure their own success/pending/failure colors + add_setting :success_color + add_setting :pending_color + add_setting :failure_color # Seed for random ordering (default: generated randomly each run). #
1
require 'fileutils' module RSpec module Core # Stores runtime configuration information. # # Configuration options are loaded from `~/.rspec`, `.rspec`, # `.rspec-local`, command line switches, and the `SPEC_OPTS` environment # variable (listed in lowest to highest precedence; for example, an option # in `~/.rspec` can be overridden by an option in `.rspec-local`). # # @example Standard settings # RSpec.configure do |c| # c.drb = true # c.drb_port = 1234 # c.default_path = 'behavior' # end # # @example Hooks # RSpec.configure do |c| # c.before(:suite) { establish_connection } # c.before(:each) { log_in_as :authorized } # c.around(:each) { |ex| Database.transaction(&ex) } # end # # @see RSpec.configure # @see Hooks class Configuration include RSpec::Core::Hooks class MustBeConfiguredBeforeExampleGroupsError < StandardError; end # @private def self.define_reader(name) eval <<-CODE def #{name} value_for(#{name.inspect}, defined?(@#{name}) ? @#{name} : nil) end CODE end # @private def self.deprecate_alias_key RSpec.warn_deprecation <<-MESSAGE The :alias option to add_setting is deprecated. Use :alias_with on the original setting instead. Called from #{caller(0)[5]} MESSAGE end # @private def self.define_aliases(name, alias_name) alias_method alias_name, name alias_method "#{alias_name}=", "#{name}=" define_predicate_for alias_name end # @private def self.define_predicate_for(*names) names.each {|name| alias_method "#{name}?", name} end # @private # # Invoked by the `add_setting` instance method. Use that method on a # `Configuration` instance rather than this class method. def self.add_setting(name, opts={}) raise "Use the instance add_setting method if you want to set a default" if opts.has_key?(:default) if opts[:alias] deprecate_alias_key define_aliases(opts[:alias], name) else attr_writer name define_reader name define_predicate_for name end [opts[:alias_with]].flatten.compact.each do |alias_name| define_aliases(name, alias_name) end end # @macro [attach] add_setting # @attribute $1 # Patterns to match against lines in backtraces presented in failure # messages in order to filter them out (default: # DEFAULT_BACKTRACE_PATTERNS). You can either replace this list using # the setter or modify it using the getter. # # To override this behavior and display a full backtrace, use # `--backtrace` on the command line, in a `.rspec` file, or in the # `rspec_options` attribute of RSpec's rake task. add_setting :backtrace_clean_patterns # Path to use if no path is provided to the `rspec` command (default: # `"spec"`). Allows you to just type `rspec` instead of `rspec spec` to # run all the examples in the `spec` directory. add_setting :default_path # Run examples over DRb (default: `false`). RSpec doesn't supply the DRb # server, but you can use tools like spork. add_setting :drb # The drb_port (default: `8989`). add_setting :drb_port # Default: `$stderr`. add_setting :error_stream # Clean up and exit after the first failure (default: `false`). add_setting :fail_fast # The exit code to return if there are any failures (default: 1). add_setting :failure_exit_code # Determines the order in which examples are run (default: OS standard # load order for files, declaration order for groups and examples). define_reader :order # Default: `$stdout`. # Also known as `output` and `out` add_setting :output_stream, :alias_with => [:output, :out] # Load files matching this pattern (default: `'**/*_spec.rb'`) add_setting :pattern, :alias_with => :filename_pattern # Report the times for the 10 slowest examples (default: `false`). add_setting :profile_examples # Run all examples if none match the configured filters (default: `false`). add_setting :run_all_when_everything_filtered # Seed for random ordering (default: generated randomly each run). # # When you run specs with `--order random`, RSpec generates a random seed # for the randomization and prints it to the `output_stream` (assuming # you're using RSpec's built-in formatters). If you discover an ordering # dependency (i.e. examples fail intermittently depending on order), set # this (on Configuration or on the command line with `--seed`) to run # using the same seed while you debug the issue. # # We recommend, actually, that you use the command line approach so you # don't accidentally leave the seed encoded. define_reader :seed # When a block passed to pending fails (as expected), display the failure # without reporting it as a failure (default: false). add_setting :show_failures_in_pending_blocks # Convert symbols to hashes with the symbol as a key with a value of # `true` (default: false). # # This allows you to tag a group or example like this: # # describe "something slow", :slow do # # ... # end # # ... instead of having to type: # # describe "something slow", :slow => true do # # ... # end add_setting :treat_symbols_as_metadata_keys_with_true_values # @private add_setting :tty # @private add_setting :include_or_extend_modules # @private add_setting :files_to_run # @private add_setting :expecting_with_rspec # @private attr_accessor :filter_manager DEFAULT_BACKTRACE_PATTERNS = [ /\/lib\d*\/ruby\//, /org\/jruby\//, /bin\//, /gems/, /spec\/spec_helper\.rb/, /lib\/rspec\/(core|expectations|matchers|mocks)/ ] def initialize @expectation_frameworks = [] @include_or_extend_modules = [] @mock_framework = nil @files_to_run = [] @formatters = [] @color = false @pattern = '**/*_spec.rb' @failure_exit_code = 1 @backtrace_clean_patterns = DEFAULT_BACKTRACE_PATTERNS.dup @default_path = 'spec' @filter_manager = FilterManager.new @preferred_options = {} @seed = srand % 0xFFFF end # @private # # Used to set higher priority option values from the command line. def force(hash) if hash.has_key?(:seed) hash[:order], hash[:seed] = order_and_seed_from_seed(hash[:seed]) elsif hash.has_key?(:order) set_order_and_seed(hash) end @preferred_options.merge!(hash) end # @private def reset @reporter = nil @formatters.clear end # @overload add_setting(name) # @overload add_setting(name, opts) # @option opts [Symbol] :default # # set a default value for the generated getter and predicate methods: # # add_setting(:foo, :default => "default value") # # @option opts [Symbol] :alias_with # # Use `:alias_with` to alias the setter, getter, and predicate to another # name, or names: # # add_setting(:foo, :alias_with => :bar) # add_setting(:foo, :alias_with => [:bar, :baz]) # # Adds a custom setting to the RSpec.configuration object. # # RSpec.configuration.add_setting :foo # # Used internally and by extension frameworks like rspec-rails, so they # can add config settings that are domain specific. For example: # # RSpec.configure do |c| # c.add_setting :use_transactional_fixtures, # :default => true, # :alias_with => :use_transactional_examples # end # # `add_setting` creates three methods on the configuration object, a # setter, a getter, and a predicate: # # RSpec.configuration.foo=(value) # RSpec.configuration.foo # RSpec.configuration.foo? # returns true if foo returns anything but nil or false def add_setting(name, opts={}) default = opts.delete(:default) (class << self; self; end).class_eval do add_setting(name, opts) end send("#{name}=", default) if default end # Used by formatters to ask whether a backtrace line should be displayed # or not, based on the line matching any `backtrace_clean_patterns`. def cleaned_from_backtrace?(line) # TODO (David 2011-12-25) why are we asking the configuration to do # stuff? Either use the patterns directly or enapsulate the filtering # in a BacktraceCleaner object. backtrace_clean_patterns.any? { |regex| line =~ regex } end # Returns the configured mock framework adapter module def mock_framework mock_with :rspec unless @mock_framework @mock_framework end # Delegates to mock_framework=(framework) def mock_framework=(framework) mock_with framework end # Sets the mock framework adapter module. # # `framework` can be a Symbol or a Module. # # Given any of `:rspec`, `:mocha`, `:flexmock`, or `:rr`, configures the # named framework. # # Given `:nothing`, configures no framework. Use this if you don't use # any mocking framework to save a little bit of overhead. # # Given a Module, includes that module in every example group. The module # should adhere to RSpec's mock framework adapter API: # # setup_mocks_for_rspec # - called before each example # # verify_mocks_for_rspec # - called after each example. Framework should raise an exception # when expectations fail # # teardown_mocks_for_rspec # - called after verify_mocks_for_rspec (even if there are errors) # # If the module responds to `configuration` and `mock_with` receives a block, # it will yield the configuration object to the block e.g. # # config.mock_with OtherMockFrameworkAdapter do |mod_config| # mod_config.custom_setting = true # end def mock_with(framework) framework_module = case framework when Module framework when String, Symbol require case framework.to_s when /rspec/i 'rspec/core/mocking/with_rspec' when /mocha/i 'rspec/core/mocking/with_mocha' when /rr/i 'rspec/core/mocking/with_rr' when /flexmock/i 'rspec/core/mocking/with_flexmock' else 'rspec/core/mocking/with_absolutely_nothing' end RSpec::Core::MockFrameworkAdapter end new_name, old_name = [framework_module, @mock_framework].map do |mod| mod.respond_to?(:framework_name) ? mod.framework_name : :unnamed end unless new_name == old_name assert_no_example_groups_defined(:mock_framework) end if block_given? raise "#{framework_module} must respond to `configuration` so that mock_with can yield it." unless framework_module.respond_to?(:configuration) yield framework_module.configuration end @mock_framework = framework_module end # Returns the configured expectation framework adapter module(s) def expectation_frameworks expect_with :rspec if @expectation_frameworks.empty? @expectation_frameworks end # Delegates to expect_with(framework) def expectation_framework=(framework) expect_with(framework) end # Sets the expectation framework module(s) to be included in each example # group. # # `frameworks` can be `:rspec`, `:stdlib`, a custom module, or any # combination thereof: # # config.expect_with :rspec # config.expect_with :stdlib # config.expect_with :rspec, :stdlib # config.expect_with OtherExpectationFramework # # RSpec will translate `:rspec` and `:stdlib` into the appropriate # modules. # # ## Configuration # # If the module responds to `configuration`, `expect_with` will # yield the `configuration` object if given a block: # # config.expect_with OtherExpectationFramework do |custom_config| # custom_config.custom_setting = true # end def expect_with(*frameworks) modules = frameworks.map do |framework| case framework when Module framework when :rspec require 'rspec/expectations' self.expecting_with_rspec = true ::RSpec::Matchers when :stdlib require 'test/unit/assertions' ::Test::Unit::Assertions else raise ArgumentError, "#{framework.inspect} is not supported" end end if (modules - @expectation_frameworks).any? assert_no_example_groups_defined(:expect_with) end if block_given? raise "expect_with only accepts a block with a single argument. Call expect_with #{modules.length} times, once with each argument, instead." if modules.length > 1 raise "#{modules.first} must respond to `configuration` so that expect_with can yield it." unless modules.first.respond_to?(:configuration) yield modules.first.configuration end @expectation_frameworks.push(*modules) end def full_backtrace=(true_or_false) @backtrace_clean_patterns = true_or_false ? [] : DEFAULT_BACKTRACE_PATTERNS end def color(output=output_stream) # rspec's built-in formatters all call this with the output argument, # but defaulting to output_stream for backward compatibility with # formatters in extension libs return false unless output_to_tty?(output) value_for(:color, @color) end def color=(bool) if bool if RSpec.windows_os? and not ENV['ANSICON'] warn "You must use ANSICON 1.31 or later (http://adoxa.3eeweb.com/ansicon/) to use colour on Windows" @color = false else @color = true end end end # TODO - deprecate color_enabled - probably not until the last 2.x # release before 3.0 alias_method :color_enabled, :color alias_method :color_enabled=, :color= define_predicate_for :color_enabled, :color def libs=(libs) libs.map {|lib| $LOAD_PATH.unshift lib} end def requires=(paths) paths.map {|path| require path} end def debug=(bool) return unless bool begin require 'ruby-debug' Debugger.start rescue LoadError => e raise <<-EOM #{'*'*50} #{e.message} If you have it installed as a ruby gem, then you need to either require 'rubygems' or configure the RUBYOPT environment variable with the value 'rubygems'. #{e.backtrace.join("\n")} #{'*'*50} EOM end end # Run examples defined on `line_numbers` in all files to run. def line_numbers=(line_numbers) filter_run :line_numbers => line_numbers.map{|l| l.to_i} end def full_description=(description) filter_run :full_description => Regexp.union(*Array(description).map {|d| Regexp.new(d) }) end # @overload add_formatter(formatter) # # Adds a formatter to the formatters collection. `formatter` can be a # string representing any of the built-in formatters (see # `built_in_formatter`), or a custom formatter class. # # ### Note # # For internal purposes, `add_formatter` also accepts the name of a class # and path to a file that contains that class definition, but you should # consider that a private api that may change at any time without notice. def add_formatter(formatter_to_use, path=nil) formatter_class = built_in_formatter(formatter_to_use) || custom_formatter(formatter_to_use) || (raise ArgumentError, "Formatter '#{formatter_to_use}' unknown - maybe you meant 'documentation' or 'progress'?.") formatters << formatter_class.new(path ? file_at(path) : output) end alias_method :formatter=, :add_formatter def formatters @formatters ||= [] end def reporter @reporter ||= begin add_formatter('progress') if formatters.empty? Reporter.new(*formatters) end end # @private def files_or_directories_to_run=(*files) files = files.flatten files << default_path if (command == 'rspec' || Runner.running_in_drb?) && default_path && files.empty? self.files_to_run = get_files_to_run(files) end # Creates a method that delegates to `example` including the submitted # `args`. Used internally to add variants of `example` like `pending`: # # @example # alias_example_to :pending, :pending => true # # # This lets you do this: # # describe Thing do # pending "does something" do # thing = Thing.new # end # end # # # ... which is the equivalent of # # describe Thing do # it "does something", :pending => true do # thing = Thing.new # end # end def alias_example_to(new_name, *args) extra_options = build_metadata_hash_from(args) RSpec::Core::ExampleGroup.alias_example_to(new_name, extra_options) end # Define an alias for it_should_behave_like that allows different # language (like "it_has_behavior" or "it_behaves_like") to be # employed when including shared examples. # # Example: # # alias_it_behaves_like_to(:it_has_behavior, 'has behavior:') # # allows the user to include a shared example group like: # # describe Entity do # it_has_behavior 'sortability' do # let(:sortable) { Entity.new } # end # end # # which is reported in the output as: # # Entity # has behavior: sortability # # sortability examples here def alias_it_behaves_like_to(new_name, report_label = '') RSpec::Core::ExampleGroup.alias_it_behaves_like_to(new_name, report_label) end alias_method :alias_it_should_behave_like_to, :alias_it_behaves_like_to # Adds key/value pairs to the `inclusion_filter`. If the # `treat_symbols_as_metadata_keys_with_true_values` config option is set # to true and `args` includes any symbols that are not part of a hash, # each symbol is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # given this declaration # describe "something", :foo => 'bar' do # # ... # end # # # any of the following will include that group # config.filter_run_including :foo => 'bar' # config.filter_run_including :foo => /^ba/ # config.filter_run_including :foo => lambda {|v| v == 'bar'} # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g. # config.filter_run_including :foo => lambda {|v| v == 'bar'} # # # given a proc with an arity of 2, the lambda is passed the value related to the key, # # and the metadata itself e.g. # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # # with treat_symbols_as_metadata_keys_with_true_values = true # filter_run_including :foo # same as filter_run_including :foo => true def filter_run_including(*args) filter_manager.include_with_low_priority build_metadata_hash_from(args) end alias_method :filter_run, :filter_run_including # Clears and reassigns the `inclusion_filter`. Set to `nil` if you don't # want any inclusion filter at all. # # ### Warning # # This overrides any inclusion filters/tags set on the command line or in # configuration files. def inclusion_filter=(filter) filter_manager.include! build_metadata_hash_from([filter]) end alias_method :filter=, :inclusion_filter= # Returns the `inclusion_filter`. If none has been set, returns an empty # hash. def inclusion_filter filter_manager.inclusions end alias_method :filter, :inclusion_filter # Adds key/value pairs to the `exclusion_filter`. If the # `treat_symbols_as_metadata_keys_with_true_values` config option is set # to true and `args` excludes any symbols that are not part of a hash, # each symbol is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # given this declaration # describe "something", :foo => 'bar' do # # ... # end # # # any of the following will exclude that group # config.filter_run_excluding :foo => 'bar' # config.filter_run_excluding :foo => /^ba/ # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g. # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # # # given a proc with an arity of 2, the lambda is passed the value related to the key, # # and the metadata itself e.g. # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # # with treat_symbols_as_metadata_keys_with_true_values = true # filter_run_excluding :foo # same as filter_run_excluding :foo => true def filter_run_excluding(*args) filter_manager.exclude_with_low_priority build_metadata_hash_from(args) end # Clears and reassigns the `exclusion_filter`. Set to `nil` if you don't # want any exclusion filter at all. # # ### Warning # # This overrides any exclusion filters/tags set on the command line or in # configuration files. def exclusion_filter=(filter) filter_manager.exclude! build_metadata_hash_from([filter]) end # Returns the `exclusion_filter`. If none has been set, returns an empty # hash. def exclusion_filter filter_manager.exclusions end # Tells RSpec to include `mod` in example groups. Methods defined in # `mod` are exposed to examples (not example groups). Use `filters` to # constrain the groups in which to include the module. # # @example # # module AuthenticationHelpers # def login_as(user) # # ... # end # end # # module UserHelpers # def users(username) # # ... # end # end # # RSpec.configure do |config| # config.include(UserHelpers) # included in all modules # config.include(AuthenticationHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # it "can be viewed by owning user" do # login_as users(:jdoe) # get "/profiles/jdoe" # assert_select ".username", :text => 'jdoe' # end # end # # @see #extend def include(mod, *filters) include_or_extend_modules << [:include, mod, build_metadata_hash_from(filters)] end # Tells RSpec to extend example groups with `mod`. Methods defined in # `mod` are exposed to example groups (not examples). Use `filters` to # constrain the groups to extend. # # Similar to `include`, but behavior is added to example groups, which # are classes, rather than the examples, which are instances of those # classes. # # @example # # module UiHelpers # def run_in_browser # # ... # end # end # # RSpec.configure do |config| # config.extend(UiHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # run_in_browser # # it "does stuff in the client" do # # ... # end # end # # @see #include def extend(mod, *filters) include_or_extend_modules << [:extend, mod, build_metadata_hash_from(filters)] end # @private # # Used internally to extend a group with modules using `include` and/or # `extend`. def configure_group(group) include_or_extend_modules.each do |include_or_extend, mod, filters| next unless filters.empty? || group.any_apply?(filters) send("safe_#{include_or_extend}", mod, group) end end # @private def safe_include(mod, host) host.send(:include,mod) unless host < mod end # @private if RUBY_VERSION.to_f >= 1.9 def safe_extend(mod, host) host.extend(mod) unless (class << host; self; end) < mod end else def safe_extend(mod, host) host.extend(mod) unless (class << host; self; end).included_modules.include?(mod) end end # @private def configure_mock_framework RSpec::Core::ExampleGroup.send(:include, mock_framework) end # @private def configure_expectation_framework expectation_frameworks.each do |framework| RSpec::Core::ExampleGroup.send(:include, framework) end end # @private def load_spec_files files_to_run.uniq.each {|f| load File.expand_path(f) } raise_if_rspec_1_is_loaded end # @private DEFAULT_FORMATTER = lambda { |string| string } # Formats the docstring output using the block provided. # # @example # # This will strip the descriptions of both examples and example groups. # RSpec.configure do |config| # config.format_docstrings { |s| s.strip } # end def format_docstrings(&block) @format_docstrings_block = block_given? ? block : DEFAULT_FORMATTER end # @private def format_docstrings_block @format_docstrings_block ||= DEFAULT_FORMATTER end # @api # # Sets the seed value and sets `order='rand'` def seed=(seed) order_and_seed_from_seed(seed) end # @api # # Sets the order and, if order is `'rand:<seed>'`, also sets the seed. def order=(type) order_and_seed_from_order(type) end def randomize? order.to_s.match(/rand/) end # @private DEFAULT_ORDERING = lambda { |list| list } # @private RANDOM_ORDERING = lambda do |list| Kernel.srand RSpec.configuration.seed list.sort_by { Kernel.rand(list.size) } end # Sets a strategy by which to order examples. # # @example # RSpec.configure do |config| # config.order_examples do |examples| # examples.reverse # end # end # # @see #order_groups # @see #order_groups_and_examples # @see #order= # @see #seed= def order_examples(&block) @example_ordering_block = block @order = "custom" unless built_in_orderer?(block) end # @private def example_ordering_block @example_ordering_block ||= DEFAULT_ORDERING end # Sets a strategy by which to order groups. # # @example # RSpec.configure do |config| # config.order_groups do |groups| # groups.reverse # end # end # # @see #order_examples # @see #order_groups_and_examples # @see #order= # @see #seed= def order_groups(&block) @group_ordering_block = block @order = "custom" unless built_in_orderer?(block) end # @private def group_ordering_block @group_ordering_block ||= DEFAULT_ORDERING end # Sets a strategy by which to order groups and examples. # # @example # RSpec.configure do |config| # config.order_groups_and_examples do |groups_or_examples| # groups_or_examples.reverse # end # end # # @see #order_groups # @see #order_examples # @see #order= # @see #seed= def order_groups_and_examples(&block) order_groups(&block) order_examples(&block) end private def get_files_to_run(paths) patterns = pattern.split(",") paths.map do |path| path = path.gsub(File::ALT_SEPARATOR, File::SEPARATOR) if File::ALT_SEPARATOR File.directory?(path) ? gather_directories(path, patterns) : extract_location(path) end.flatten.sort end def gather_directories(path, patterns) patterns.map do |pattern| pattern =~ /^#{path}/ ? Dir[pattern.strip].sort : Dir["#{path}/{#{pattern.strip}}"].sort end end def extract_location(path) if path =~ /^(.*?)((?:\:\d+)+)$/ path, lines = $1, $2[1..-1].split(":").map{|n| n.to_i} filter_manager.add_location path, lines end path end def command $0.split(File::SEPARATOR).last end def value_for(key, default=nil) @preferred_options.has_key?(key) ? @preferred_options[key] : default end def assert_no_example_groups_defined(config_option) if RSpec.world.example_groups.any? raise MustBeConfiguredBeforeExampleGroupsError.new( "RSpec's #{config_option} configuration option must be configured before " + "any example groups are defined, but you have already defined a group." ) end end def raise_if_rspec_1_is_loaded if defined?(Spec) && defined?(Spec::VERSION::MAJOR) && Spec::VERSION::MAJOR == 1 raise <<-MESSAGE #{'*'*80} You are running rspec-2, but it seems as though rspec-1 has been loaded as well. This is likely due to a statement like this somewhere in the specs: require 'spec' Please locate that statement, remove it, and try again. #{'*'*80} MESSAGE end end def output_to_tty?(output=output_stream) tty? || (output.respond_to?(:tty?) && output.tty?) end def built_in_formatter(key) case key.to_s when 'd', 'doc', 'documentation', 's', 'n', 'spec', 'nested' require 'rspec/core/formatters/documentation_formatter' RSpec::Core::Formatters::DocumentationFormatter when 'h', 'html' require 'rspec/core/formatters/html_formatter' RSpec::Core::Formatters::HtmlFormatter when 't', 'textmate' require 'rspec/core/formatters/text_mate_formatter' RSpec::Core::Formatters::TextMateFormatter when 'p', 'progress' require 'rspec/core/formatters/progress_formatter' RSpec::Core::Formatters::ProgressFormatter when 'j', 'json' require 'rspec/core/formatters/json_formatter' RSpec::Core::Formatters::JsonFormatter end end def custom_formatter(formatter_ref) if Class === formatter_ref formatter_ref elsif string_const?(formatter_ref) begin eval(formatter_ref) rescue NameError require path_for(formatter_ref) eval(formatter_ref) end end end def string_const?(str) str.is_a?(String) && /\A[A-Z][a-zA-Z0-9_:]*\z/ =~ str end def path_for(const_ref) underscore_with_fix_for_non_standard_rspec_naming(const_ref) end def underscore_with_fix_for_non_standard_rspec_naming(string) underscore(string).sub(%r{(^|/)r_spec($|/)}, '\\1rspec\\2') end # activesupport/lib/active_support/inflector/methods.rb, line 48 def underscore(camel_cased_word) word = camel_cased_word.to_s.dup word.gsub!(/::/, '/') word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') word.tr!("-", "_") word.downcase! word end def file_at(path) FileUtils.mkdir_p(File.dirname(path)) File.new(path, 'w') end def order_and_seed_from_seed(value) order_groups_and_examples(&RANDOM_ORDERING) @order, @seed = 'rand', value.to_i [@order, @seed] end def set_order_and_seed(hash) hash[:order], seed = order_and_seed_from_order(hash[:order]) hash[:seed] = seed if seed end def order_and_seed_from_order(type) order, seed = type.to_s.split(':') @order = order @seed = seed = seed.to_i if seed if randomize? order_groups_and_examples(&RANDOM_ORDERING) elsif order == 'default' @order, @seed = nil, nil order_groups_and_examples(&DEFAULT_ORDERING) end return order, seed end def built_in_orderer?(block) [DEFAULT_ORDERING, RANDOM_ORDERING].include?(block) end end end end
1
8,209
I think we need some YARD docs here, particularly to list all the color symbols that are valid. Otherwise users will have to look at the source to discover that.
rspec-rspec-core
rb
@@ -31,6 +31,13 @@ import { STORE_NAME, AMP_MODE_PRIMARY, AMP_MODE_SECONDARY } from './constants'; const { createRegistrySelector } = Data; +function getSiteInfoProperty( propName ) { + return createRegistrySelector( ( select ) => () => { + const siteInfo = select( STORE_NAME ).getSiteInfo() || {}; + return siteInfo[ propName ]; + } ); +} + // Actions const RECEIVE_SITE_INFO = 'RECEIVE_SITE_INFO';
1
/** * core/site data store: site info. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import invariant from 'invariant'; import { addQueryArgs } from '@wordpress/url'; import queryString from 'query-string'; /** * Internal dependencies */ import Data from 'googlesitekit-data'; import { STORE_NAME, AMP_MODE_PRIMARY, AMP_MODE_SECONDARY } from './constants'; const { createRegistrySelector } = Data; // Actions const RECEIVE_SITE_INFO = 'RECEIVE_SITE_INFO'; export const INITIAL_STATE = { siteInfo: undefined, }; export const actions = { /** * Stores site info in the datastore. * * Because this is frequently-accessed data, this is usually sourced * from a global variable (`_googlesitekitSiteData`), set by PHP * in the `before_print` callback for `googlesitekit-datastore-site`. * * @since 1.7.0 * @private * * @param {Object} siteInfo Site info, usually supplied via a global variable from PHP. * @return {Object} Redux-style action. */ receiveSiteInfo( siteInfo ) { invariant( siteInfo, 'siteInfo is required.' ); return { payload: { siteInfo }, type: RECEIVE_SITE_INFO, }; }, }; export const controls = {}; export const reducer = ( state, { payload, type } ) => { switch ( type ) { case RECEIVE_SITE_INFO: { const { adminURL, ampMode, currentEntityURL, currentEntityID, currentEntityTitle, currentEntityType, homeURL, referenceSiteURL, timezone, usingProxy, siteName, } = payload.siteInfo; return { ...state, siteInfo: { adminURL, ampMode, currentEntityURL, currentEntityID: parseInt( currentEntityID, 10 ), currentEntityTitle, currentEntityType, homeURL, referenceSiteURL, timezone, usingProxy, siteName, }, }; } default: { return { ...state }; } } }; export const resolvers = { *getSiteInfo() { const registry = yield Data.commonActions.getRegistry(); if ( registry.select( STORE_NAME ).getSiteInfo() ) { return; } if ( ! global._googlesitekitBaseData || ! global._googlesitekitEntityData ) { global.console.error( 'Could not load core/site info.' ); return; } const { adminURL, ampMode, homeURL, referenceSiteURL, timezone, usingProxy, siteName, } = global._googlesitekitBaseData; const { currentEntityURL, currentEntityID, currentEntityTitle, currentEntityType, } = global._googlesitekitEntityData; yield actions.receiveSiteInfo( { adminURL, ampMode, currentEntityURL, currentEntityID, currentEntityTitle, currentEntityType, homeURL, referenceSiteURL, timezone, usingProxy: !! usingProxy, siteName, } ); }, }; export const selectors = { /** * Gets all site info from this data store. * * Not intended to be used publicly; this is largely here so other selectors can * request data using the selector/resolver pattern. * * @since 1.7.0 * @private * * @param {Object} state Data store's state. * @return {(Object|undefined)} Site connection info. */ getSiteInfo( state ) { return state.siteInfo; }, /** * Gets a site's admin URL. * * @since 1.7.0 * * @param {Object} state Data store's state. * @param {(string|undefined)} page Optional page query argument ( Simple format: 'test-page' or Full format: 'custom.php?page=test-page' ) to add to admin URL. If not provided, the base admin URL is returned. * @param {(Object|undefined)} args Optional additional query arguments to add to admin URL. * @return {(string|undefined)} This site's admin URL. */ getAdminURL: createRegistrySelector( ( select ) => ( state, page, args = {} ) => { const { adminURL } = select( STORE_NAME ).getSiteInfo() || {}; // Return adminURL if undefined, or if no page supplied. if ( adminURL === undefined || page === undefined ) { return adminURL; } const baseURL = ( adminURL[ adminURL.length - 1 ] === '/' ) ? adminURL : `${ adminURL }/`; let pageArg = page; let phpFile = 'admin.php'; // If page argument is full format (i.e. 'admin.php?page=google-site-kit'), extract php file and pageArg, returning early with adminURL if no 'page' param found. if ( page.indexOf( '.php?' ) !== -1 ) { const splitPage = page.split( '?' ); pageArg = queryString.parse( splitPage.pop() ).page; if ( ! pageArg ) { return adminURL; } phpFile = splitPage.shift(); } // Since page should be first query arg, create queryArgs without 'page' to prevent a 'page' in args from overriding it. const { page: extraPage, ...queryArgs } = args; // eslint-disable-line no-unused-vars return addQueryArgs( `${ baseURL }${ phpFile }`, { page: pageArg, ...queryArgs, } ); } ), /** * Gets a site's AMP mode. * * @since 1.7.0 * * @param {Object} state Data store's state. * @return {(string|undefined)} AMP Mode. */ getAMPMode: createRegistrySelector( ( select ) => () => { const { ampMode } = select( STORE_NAME ).getSiteInfo() || {}; return ampMode; } ), /** * Gets the current entity's ID. * * @since 1.7.0 * * @param {Object} state Data store's state. * @return {(number|null|undefined)} Current entity's ID, null if there is * none, undefined if not loaded yet. */ getCurrentEntityID: createRegistrySelector( ( select ) => () => { const { currentEntityID } = select( STORE_NAME ).getSiteInfo() || {}; return currentEntityID; } ), /** * Gets the current entity's title. * * @since 1.7.0 * * @param {Object} state Data store's state. * @return {(string|null|undefined)} Current entity's title, null if there * is none, undefined if not loaded yet. */ getCurrentEntityTitle: createRegistrySelector( ( select ) => () => { const { currentEntityTitle } = select( STORE_NAME ).getSiteInfo() || {}; return currentEntityTitle; } ), /** * Gets the current entity's title. * * @since 1.7.0 * * @param {Object} state Data store's state. * @return {(string|null|undefined)} Current entity's type, null if there * is none, undefined if not loaded yet. */ getCurrentEntityType: createRegistrySelector( ( select ) => () => { const { currentEntityType } = select( STORE_NAME ).getSiteInfo() || {}; return currentEntityType; } ), /** * Gets the current entity's reference URL. * * @since 1.7.0 * * @param {Object} state Data store's state. * @return {(string|null|undefined)} Current entity's URL, null if there is * none, undefined if not loaded yet. */ getCurrentEntityURL: createRegistrySelector( ( select ) => () => { const { currentEntityURL } = select( STORE_NAME ).getSiteInfo() || {}; return currentEntityURL; } ), /** * Gets a site's homepage URL. * * @since 1.7.0 * * @param {Object} state Data store's state. * @return {(string|undefined)} This site's home URL. */ getHomeURL: createRegistrySelector( ( select ) => () => { const { homeURL } = select( STORE_NAME ).getSiteInfo() || {}; return homeURL; } ), /** * Gets a site's reference site URL. * * @since 1.7.0 * * @param {Object} state Data store's state. * @return {(string|undefined)} The reference site URL. */ getReferenceSiteURL: createRegistrySelector( ( select ) => () => { const { referenceSiteURL } = select( STORE_NAME ).getSiteInfo() || {}; return referenceSiteURL; } ), /** * Gets the current reference URL to use. * * This selector should be used to get the contextual URL for requesting * URL-specific data from Google APIs. * * If a current entity exists, this will return the same value as the * `getCurrentEntityURL` selector. Otherwise it will fall back to returning * the same value as the `getReferenceSiteURL` selector. * * @since 1.10.0 * * @param {Object} state Data store's state. * @return {(string|undefined)} The current reference URL, or undefined if * not loaded yet. */ getCurrentReferenceURL: createRegistrySelector( ( select ) => () => { // Use current entity URL if present or still loading. const currentEntityURL = select( STORE_NAME ).getCurrentEntityURL(); if ( currentEntityURL !== null ) { return currentEntityURL; } // Otherwise fall back to reference site URL. return select( STORE_NAME ).getReferenceSiteURL(); } ), /** * Returns true if this site supports AMP. * * @since 1.7.0 * @since 1.11.0 Renamed from isAmp to isAMP. * * @param {Object} state Data store's state. * @return {(string|undefined)} `true` if AMP support is enabled, `false` if not. Returns `undefined` if not loaded. */ isAMP: createRegistrySelector( ( select ) => () => { const ampMode = select( STORE_NAME ).getAMPMode(); if ( ampMode === undefined ) { return undefined; } return !! ampMode; } ), /** * Checks if the site is in the primary AMP mode. * * @since 1.12.0 * * @return {(boolean|undefined)} `true` or `false` if the site is in the primary AMP mode. Returns `undefined` if not loaded. */ isPrimaryAMP: createRegistrySelector( ( select ) => () => { const ampMode = select( STORE_NAME ).getAMPMode(); if ( ampMode === undefined ) { return undefined; } return ampMode === AMP_MODE_PRIMARY; } ), /** * Checks if the site is in a secondary AMP mode. * * @since 1.12.0 * * @return {(boolean|undefined)} `true` or `false` if the site is in a secondary AMP mode. Returns `undefined` if not loaded. */ isSecondaryAMP: createRegistrySelector( ( select ) => () => { const ampMode = select( STORE_NAME ).getAMPMode(); if ( ampMode === undefined ) { return undefined; } return ampMode === AMP_MODE_SECONDARY; } ), /** * Gets a site's timezone. * * @since 1.9.0 * * @param {Object} state Data store's state. * @return {(string|undefined)} The timezone. */ getTimezone: createRegistrySelector( ( select ) => () => { const { timezone } = select( STORE_NAME ).getSiteInfo() || {}; return timezone; } ), /** * Returns true if this site is using the proxy service. * * @since 1.9.0 * * @param {Object} state Data store's state. * @return {(boolean|undefined)} `true` if the proxy service is in use, `false` if not. Returns `undefined` if not loaded. */ isUsingProxy: createRegistrySelector( ( select ) => () => { const { usingProxy } = select( STORE_NAME ).getSiteInfo() || {}; return usingProxy; } ), /** * Gets a site's name. * * @since 1.9.0 * * @param {Object} state Data store's state. * @return {(string|undefined)} The site name. */ getSiteName: createRegistrySelector( ( select ) => () => { const { siteName } = select( STORE_NAME ).getSiteInfo() || {}; return siteName; } ), }; export default { INITIAL_STATE, actions, controls, reducer, resolvers, selectors, };
1
30,844
Not sure if this needs a doc block or not since it's completely internal, but for cleaning this up. (@felixarntz docs?)
google-site-kit-wp
js
@@ -616,7 +616,8 @@ libponyc.benchmarks.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE) libgbenchmark.buildoptions := \ -Wshadow -pedantic -pedantic-errors \ - -Wfloat-equal -fstrict-aliasing -Wstrict-aliasing -Wno-invalid-offsetof \ + -fstrict-aliasing -Wstrict-aliasing \ + -Wno-invalid-offsetof -Wno-deprecated-declarations \ -DHAVE_POSIX_REGEX -DHAVE_STD_REGEX -DHAVE_STEADY_CLOCK ifneq ($(ALPINE),)
1
# Determine the operating system OSTYPE ?= ifeq ($(OS),Windows_NT) OSTYPE = windows else UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) OSTYPE = linux ifndef AR ifneq (,$(shell which gcc-ar 2> /dev/null)) AR = gcc-ar endif endif ALPINE=$(wildcard /etc/alpine-release) endif ifeq ($(UNAME_S),Darwin) OSTYPE = osx endif ifeq ($(UNAME_S),FreeBSD) OSTYPE = bsd CXX = c++ endif ifeq ($(UNAME_S),DragonFly) OSTYPE = bsd CXX = c++ endif ifeq ($(UNAME_S),OpenBSD) OSTYPE = bsd CXX = c++ LLVM_CONFIG = /usr/local/bin/llvm-config default_pic = true endif endif # By default, CC is cc and CXX is g++ # So if you use standard alternatives on many Linuxes # You can get clang and g++ and then bad things will happen ifneq (,$(shell $(CC) --version 2>&1 | grep clang)) ifneq (,$(shell $(CXX) --version 2>&1 | grep "Free Software Foundation")) CXX = c++ endif ifneq (,$(shell $(CXX) --version 2>&1 | grep "Free Software Foundation")) $(error CC is clang but CXX is g++. They must be from matching compilers.) endif else ifneq (,$(shell $(CC) --version 2>&1 | grep "Free Software Foundation")) ifneq (,$(shell $(CXX) --version 2>&1 | grep clang)) CXX = c++ endif ifneq (,$(shell $(CXX) --version 2>&1 | grep clang)) $(error CC is gcc but CXX is clang++. They must be from matching compilers.) endif endif ifdef LTO_PLUGIN lto := yes endif # Default settings (silent release build). config ?= release arch ?= native tune ?= generic cpu ?= $(arch) fpu ?= bits ?= $(shell getconf LONG_BIT) # linking strategy (static, llvm-static, llvm-dynamic) # static links everything statically (only works with musl libc) # llvm-static links llvm statically # llvm-dynamic links llvm dynamically link ?= llvm-static ifndef verbose SILENT = @ else SILENT = endif # Default to version from `VERSION` file but allowing overridding on the # make command line like: # make version="nightly-19710702" # overridden version *should not* contain spaces or characters that aren't # legal in filesystem path names ifndef version version := $(shell cat VERSION) ifneq ($(wildcard .git),) sha := $(shell git rev-parse --short HEAD) tag := $(version)-$(sha) else tag := $(version) endif else tag := $(version) endif version_str = "$(tag) [$(config)]\ncompiled with: llvm $(llvm_version) \ -- "$(compiler_version) # package_name, _version, and _iteration can be overridden by Travis or AppVeyor package_base_version ?= $(tag) package_iteration ?= "1" package_name ?= "ponyc" package_version = $(package_base_version)-$(package_iteration) archive = $(package_name)-$(package_version).tar package = build/$(package_name)-$(package_version) prefix ?= /usr/local bindir ?= $(prefix)/bin includedir ?= $(prefix)/include libdir ?= $(prefix)/lib # destdir is for backward compatibility only, use ponydir instead. ifdef destdir $(warning Please use ponydir instead of destdir.) ponydir ?= $(destdir) endif ponydir ?= $(libdir)/pony/$(tag) symlink := yes ifdef ponydir ifndef prefix symlink := no endif endif ifneq (,$(filter $(OSTYPE), osx bsd)) symlink.flags = -sf else symlink.flags = -srf endif ifneq (,$(filter $(OSTYPE), osx bsd)) SED_INPLACE = sed -i -e else SED_INPLACE = sed -i endif LIB_EXT ?= a BUILD_FLAGS = -march=$(arch) -mtune=$(tune) -Werror -Wconversion \ -Wno-sign-conversion -Wextra -Wall LINKER_FLAGS = -march=$(arch) -mtune=$(tune) $(LDFLAGS) AR_FLAGS ?= rcs ALL_CFLAGS = -std=gnu11 -fexceptions \ -DPONY_VERSION=\"$(tag)\" -DLLVM_VERSION=\"$(llvm_version)\" \ -DPONY_COMPILER=\"$(CC)\" -DPONY_ARCH=\"$(arch)\" \ -DBUILD_COMPILER=\"$(compiler_version)\" \ -DPONY_BUILD_CONFIG=\"$(config)\" \ -DPONY_VERSION_STR=\"$(version_str)\" \ -D_FILE_OFFSET_BITS=64 ALL_CXXFLAGS = -std=gnu++11 -fno-rtti LL_FLAGS = -mcpu=$(cpu) # Determine pointer size in bits. BITS := $(bits) UNAME_M := $(shell uname -m) ifeq ($(BITS),64) ifeq ($(UNAME_M),x86_64) ifeq (,$(filter $(arch), armv8-a)) BUILD_FLAGS += -mcx16 LINKER_FLAGS += -mcx16 endif endif endif ifneq ($(fpu),) BUILD_FLAGS += -mfpu=$(fpu) LINKER_FLAGS += -mfpu=$(fpu) endif ifdef link ifeq (,$(filter $(link),static llvm-static llvm-dynamic)) $(error Unknown linking strategy "$(link)") endif endif ifneq (,$(filter $(link),static llvm-static)) LLVM_LINKING =--link-static LINKER_FLAGS += -static-libstdc++ ifneq (,$(shell $(CC) -v 2>&1 | grep gcc)) LINKER_FLAGS += -static-libgcc endif $(info "linking llvm statically") else ifeq ($(link),llvm-dynamic) ifneq ($(LLVM_VENDOR),true) LLVM_LINKING =--link-shared $(info "linking llvm dynamically") else $(error "Can't use llvm-dynamic with vendored LLVM at this time") endif endif PONY_BUILD_DIR ?= build/$(config) PONY_SOURCE_DIR ?= src PONY_TEST_DIR ?= test PONY_BENCHMARK_DIR ?= benchmark comma:= , empty:= space:= $(empty) $(empty) define USE_CHECK $$(info Enabling use option: $1) ifeq ($1,valgrind) ALL_CFLAGS += -DUSE_VALGRIND PONY_BUILD_DIR := $(PONY_BUILD_DIR)-valgrind else ifeq ($1,thread_sanitizer) ALL_CFLAGS += -fsanitize=thread -DPONY_SANITIZER=\"thread\" ALL_CXXFLAGS += -fsanitize=thread -DPONY_SANITIZER=\"thread\" LINKER_FLAGS += -fsanitize=thread -DPONY_SANITIZER=\"thread\" PONY_BUILD_DIR := $(PONY_BUILD_DIR)-thread_sanitizer else ifeq ($1,address_sanitizer) ALL_CFLAGS += -fsanitize=address -DPONY_SANITIZER=\"address\" ALL_CXXFLAGS += -fsanitize=address -DPONY_SANITIZER=\"address\" LINKER_FLAGS += -fsanitize=address -DPONY_SANITIZER=\"address\" PONY_BUILD_DIR := $(PONY_BUILD_DIR)-address_sanitizer else ifeq ($1,undefined_behavior_sanitizer) ALL_CFLAGS += -fsanitize=undefined -DPONY_SANITIZER=\"undefined\" ALL_CXXFLAGS += -fsanitize=undefined -DPONY_SANITIZER=\"undefined\" LINKER_FLAGS += -fsanitize=undefined -DPONY_SANITIZER=\"undefined\" PONY_BUILD_DIR := $(PONY_BUILD_DIR)-undefined_behavior_sanitizer else ifeq ($1,coverage) ifneq (,$(shell $(CC) -v 2>&1 | grep clang)) # clang COVERAGE_FLAGS = -O0 -fprofile-instr-generate -fcoverage-mapping LINKER_FLAGS += -fprofile-instr-generate -fcoverage-mapping else ifneq (,$(shell $(CC) -v 2>&1 | grep "gcc version")) # gcc COVERAGE_FLAGS = -O0 -fprofile-arcs -ftest-coverage LINKER_FLAGS += -fprofile-arcs else $$(error coverage not supported for this compiler/platform) endif ALL_CFLAGS += $(COVERAGE_FLAGS) ALL_CXXFLAGS += $(COVERAGE_FLAGS) endif PONY_BUILD_DIR := $(PONY_BUILD_DIR)-coverage else ifeq ($1,pooltrack) ALL_CFLAGS += -DUSE_POOLTRACK PONY_BUILD_DIR := $(PONY_BUILD_DIR)-pooltrack else ifeq ($1,dtrace) DTRACE ?= $(shell which dtrace) ifeq (, $$(DTRACE)) $$(error No dtrace compatible user application static probe generation tool found) endif ALL_CFLAGS += -DUSE_DYNAMIC_TRACE PONY_BUILD_DIR := $(PONY_BUILD_DIR)-dtrace else ifeq ($1,actor_continuations) ALL_CFLAGS += -DUSE_ACTOR_CONTINUATIONS PONY_BUILD_DIR := $(PONY_BUILD_DIR)-actor_continuations else ifeq ($1,scheduler_scaling_pthreads) ALL_CFLAGS += -DUSE_SCHEDULER_SCALING_PTHREADS PONY_BUILD_DIR := $(PONY_BUILD_DIR)-scheduler_scaling_pthreads else ifeq ($1,memtrack) ALL_CFLAGS += -DUSE_MEMTRACK ALL_CXXFLAGS += -DUSE_MEMTRACK PONY_BUILD_DIR := $(PONY_BUILD_DIR)-memtrack else ifeq ($1,memtrack_messages) ALL_CFLAGS += -DUSE_MEMTRACK -DUSE_MEMTRACK_MESSAGES ALL_CXXFLAGS += -DUSE_MEMTRACK -DUSE_MEMTRACK_MESSAGES PONY_BUILD_DIR := $(PONY_BUILD_DIR)-memtrack-messages else $$(error ERROR: Unknown use option specified: $1) endif endef ifdef use $(foreach useitem,$(sort $(subst $(comma),$(space),$(use))),$(eval $(call USE_CHECK,$(useitem)))) endif ifdef config ifeq (,$(filter $(config),debug release)) $(error Unknown configuration "$(config)") endif endif ifeq ($(config),release) BUILD_FLAGS += -O3 -DNDEBUG LL_FLAGS += -O3 ifeq ($(lto),yes) BUILD_FLAGS += -flto -DPONY_USE_LTO LINKER_FLAGS += -flto ifdef LTO_PLUGIN AR_FLAGS += --plugin $(LTO_PLUGIN) endif ifneq (,$(filter $(OSTYPE),linux bsd)) LINKER_FLAGS += -fuse-linker-plugin -fuse-ld=gold endif endif else BUILD_FLAGS += -g -DDEBUG endif ifeq ($(OSTYPE),osx) ALL_CFLAGS += -mmacosx-version-min=10.12 -DUSE_SCHEDULER_SCALING_PTHREADS ALL_CXXFLAGS += -stdlib=libc++ -mmacosx-version-min=10.12 endif # If we are not cleaning we need LLVM_CONFIG ifneq ($(MAKECMDGOALS),clean) ifndef LLVM_CONFIG ifneq (,$(shell which llvm-config 2> /dev/null)) LLVM_CONFIG = llvm-config else $(error No LLVM installation found! Set LLVM_CONFIG environment variable \ to the `llvm-config` binary for your installation) endif else ifeq (,$(shell which $(LLVM_CONFIG) 2> /dev/null)) $(error LLVM config $(LLVM_CONFIG) not found! Set LLVM_CONFIG environment \ variable to a valid LLVM installation.) endif LLVM_BINDIR := $(shell $(LLVM_CONFIG) --bindir 2> /dev/null) LLVM_LINK := $(LLVM_BINDIR)/llvm-link LLVM_OPT := $(LLVM_BINDIR)/opt LLVM_LLC := $(LLVM_BINDIR)/llc LLVM_AS := $(LLVM_BINDIR)/llvm-as llvm_build_mode := $(shell $(LLVM_CONFIG) --build-mode) ifeq (Release,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_Release else ifeq (RelWithDebInfo,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_RelWithDebInfo else ifeq (MinSizeRel,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_MinSizeRel else ifeq (Debug,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_Debug else $(error "Unknown llvm build-mode of $(llvm_build_mode)", aborting) endif llvm_version := $(shell $(LLVM_CONFIG) --version) ifeq ($(OSTYPE),osx) ifneq (,$(shell which $(LLVM_BINDIR)/llvm-ar 2> /dev/null)) AR = $(LLVM_BINDIR)/llvm-ar AR_FLAGS := rcs else AR = /usr/bin/ar AR_FLAGS := -rcs endif endif ifeq ($(llvm_version),3.9.1) else ifeq ($(llvm_version),5.0.2) else ifeq ($(llvm_version),6.0.1) else ifeq ($(llvm_version),7.1.0) else $(warning WARNING: Unsupported LLVM version: $(llvm_version)) $(warning Please use LLVM 3.9.1, 5.0.2, 6.0.1, 7.1.0) endif # Third party, but prebuilt. Prebuilt libraries are defined as # (1) a name (stored in prebuilt) # (2) the linker flags necessary to link against the prebuilt libraries # (3) a list of include directories for a set of libraries # (4) a list of the libraries to link against llvm.libdir := $(CROSS_SYSROOT)$(subst -L,,$(shell $(LLVM_CONFIG) --ldflags $(LLVM_LINKING))) llvm.ldflags := -L$(llvm.libdir) #$(info llvm.libdir="$(llvm.libdir)") #$(info llvm.ldflags="$(llvm.ldflags)") # Get cflags using llvm-config llvm.get_cflags := $(LLVM_CONFIG) --cflags $(LLVM_LINKING) #$(info llvm.get_cflags="$(llvm.get_cflags)") llvm.cflags := $(shell sh -c "$(llvm.get_cflags)") #$(info llvm.cflags="$(llvm.cflags)") # Get include dirs using grep & sed to extract "-I<dir>" and "-isystem<dir>" entries # that can occur anywhere in the string and <dir> may have a leading spaces, but the # regex assumes a directory does NOT contain spaces. # Note: [:space:] is used for greater portability. llvm.get_include_dirs := echo '$(llvm.cflags)' | grep -oE -- '(^-I[[:space:]]*| -I[[:space:]]*|^-isystem[[:space:]]*| -isystem[[:space:]]*)[^[:space:]]+' | sed -E 's/^[[:space:]]*(-I[[:space:]]*|-isystem[[:space:]]*)//' #$(info llvm.get_include_dirs="$(llvm.get_include_dirs)") llvm.include_dirs := $(shell sh -c "$(llvm.get_include_dirs)") #$(info llvm.include_dirs="$(llvm.include_dirs)") # Get the compiler output of verbose "-v" and preprocess, "-E" parameters which # contains the search paths. verbose_preprocess_string := $(shell echo | $(CC) -v -E - 2>&1) #$(info verbose_preprocess_string="$(verbose_preprocess_string)") # We must escape any double quotes, ", and any hash, #, characters. quoteDblQuote := $(subst ",\",$(verbose_preprocess_string)) #$(info quoteDblQuote="$(quoteDblQuote)") quoted_verbose_preprocess_string := $(subst \#,\\\#,$(quoteDblQuote)) #$(info quoted_verbose_preprocess_string="$(quoted_verbose_preprocess_string)") # Now use a sed command line to extract the search paths from the # quoted verbose preprocess string get_search_paths := sed 's/\(.*\)search starts here:\(.*\)End of search list.\(.*\)/\2/' #$(info get_search_paths="$(get_search_paths)") search_paths := $(shell echo "$(quoted_verbose_preprocess_string)" | $(get_search_paths)) #$(info search_paths="$(search_paths)") # Note: $(search_paths) is padded with a space on front and back so # that when we iterate the ${inc_dir} variable is guaranteed to have # a space at the beginning and end making finding a match easy. If # there is no match we output the ${inc_dir}. loopit := \ for inc_dir in $(llvm.include_dirs); do \ if ! echo " $(search_paths) " | grep -q " $${inc_dir} "; then \ echo "-isystem $(CROSS_SYSROOT)$${inc_dir}"; \ fi \ done #$(info loopit="$(loopit)") llvm.include = $(shell $(loopit)) #$(info llvm.include="$(llvm.include)") llvm.libs := $(shell $(LLVM_CONFIG) --libs $(LLVM_LINKING)) $(shell $(LLVM_CONFIG) --system-libs $(LLVM_LINKING)) endif compiler_version := "$(shell $(CC) --version | sed -n 1p)" ifeq ($(runtime-bitcode),yes) ifeq (,$(shell $(CC) -v 2>&1 | grep clang)) $(error Compiling the runtime as a bitcode file requires clang) endif endif makefile_abs_path := $(realpath $(lastword $(MAKEFILE_LIST))) packages_abs_src := $(shell dirname $(makefile_abs_path))/packages $(shell mkdir -p $(PONY_BUILD_DIR)) $(info Building into $(PONY_BUILD_DIR)) lib := $(PONY_BUILD_DIR)/lib/$(arch) bin := $(PONY_BUILD_DIR) tests := $(PONY_BUILD_DIR) benchmarks := $(PONY_BUILD_DIR) obj := $(PONY_BUILD_DIR)/obj-$(arch) # Libraries. Defined as # (1) a name and output directory libponyc := $(lib) libponycc := $(lib) libponyrt := $(lib) ifeq ($(OSTYPE),linux) libponyrt-pic := $(lib) endif # Define special case rules for a targets source files. By default # this makefile assumes that a targets source files can be found # relative to a parent directory of the same name in $(PONY_SOURCE_DIR). # Note that it is possible to collect files and exceptions with # arbitrarily complex shell commands, as long as ':=' is used # for definition, instead of '='. ifneq ($(OSTYPE),windows) libponyc.except += src/libponyc/platform/signed.cc libponyc.except += src/libponyc/platform/unsigned.cc libponyc.except += src/libponyc/platform/vcvars.c endif # Handle platform specific code to avoid "no symbols" warnings. libponyrt.except = ifneq ($(OSTYPE),windows) libponyrt.except += src/libponyrt/asio/iocp.c libponyrt.except += src/libponyrt/lang/win_except.c endif ifneq ($(OSTYPE),linux) libponyrt.except += src/libponyrt/asio/epoll.c endif ifneq ($(OSTYPE),osx) ifneq ($(OSTYPE),bsd) libponyrt.except += src/libponyrt/asio/kqueue.c endif endif libponyrt.except += src/libponyrt/asio/sock.c libponyrt.except += src/libponyrt/dist/dist.c libponyrt.except += src/libponyrt/dist/proto.c ifeq ($(OSTYPE),linux) libponyrt-pic.dir := src/libponyrt libponyrt-pic.except := $(libponyrt.except) endif # Third party, but requires compilation. Defined as # (1) a name and output directory. # (2) a list of the source files to be compiled. libgtest := $(lib) libgtest.dir := lib/gtest libgtest.files := $(libgtest.dir)/gtest-all.cc libgbenchmark := $(lib) libgbenchmark.dir := lib/gbenchmark libgbenchmark.srcdir := $(libgbenchmark.dir)/src libblake2 := $(lib) libblake2.dir := lib/blake2 libblake2.files := $(libblake2.dir)/blake2b-ref.c # We don't add libponyrt here. It's a special case because it can be compiled # to LLVM bitcode. ifeq ($(OSTYPE), linux) libraries := libponyc libponyrt-pic libgtest libgbenchmark libblake2 else libraries := libponyc libgtest libgbenchmark libblake2 endif ifeq ($(OSTYPE), bsd) extra.bsd.libs = -lpthread -lexecinfo llvm.libs += $(extra.bsd.libs) endif prebuilt := llvm # Binaries. Defined as # (1) a name and output directory. ponyc := $(bin) binaries := ponyc # Tests suites are directly attached to the libraries they test. libponyc.tests := $(tests) libponyrt.tests := $(tests) tests := libponyc.tests libponyrt.tests # Benchmark suites are directly attached to the libraries they test. libponyc.benchmarks := $(benchmarks) libponyc.benchmarks.dir := benchmark/libponyc libponyc.benchmarks.srcdir := $(libponyc.benchmarks.dir) libponyrt.benchmarks := $(benchmarks) libponyrt.benchmarks.dir := benchmark/libponyrt libponyrt.benchmarks.srcdir := $(libponyrt.benchmarks.dir) benchmarks := libponyc.benchmarks libponyrt.benchmarks # Define include paths for targets if necessary. Note that these include paths # will automatically apply to the test suite of a target as well. libponyc.include := -I src/common/ -I src/libponyrt/ $(llvm.include) \ -isystem lib/blake2 libponycc.include := -I src/common/ $(llvm.include) libponyrt.include := -I src/common/ -I src/libponyrt/ libponyrt-pic.include := $(libponyrt.include) libponyc.tests.include := -I src/common/ -I src/libponyc/ -I src/libponyrt \ $(llvm.include) -isystem lib/gtest/ libponyrt.tests.include := -I src/common/ -I src/libponyrt/ -isystem lib/gtest/ libponyc.benchmarks.include := -I src/common/ -I src/libponyc/ \ $(llvm.include) -isystem lib/gbenchmark/include/ libponyrt.benchmarks.include := -I src/common/ -I src/libponyrt/ -isystem \ lib/gbenchmark/include/ ponyc.include := -I src/common/ -I src/libponyrt/ $(llvm.include) libgtest.include := -isystem lib/gtest/ libgbenchmark.include := -isystem lib/gbenchmark/include/ libblake2.include := -isystem lib/blake2/ ifneq (,$(filter $(OSTYPE), osx bsd)) libponyrt.include += -I $(CROSS_SYSROOT)/usr/local/include endif # target specific build options libponyrt.tests.linkoptions += -rdynamic ifneq ($(ALPINE),) libponyrt.tests.linkoptions += -lexecinfo endif # link statically ifeq ($(link),static) libponyrt.tests.linkoptions += -static endif libponyc.buildoptions = -D__STDC_CONSTANT_MACROS libponyc.buildoptions += -D__STDC_FORMAT_MACROS libponyc.buildoptions += -D__STDC_LIMIT_MACROS libponyc.buildoptions += -DPONY_ALWAYS_ASSERT libponyc.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE) libponyc.tests.buildoptions = -D__STDC_CONSTANT_MACROS libponyc.tests.buildoptions += -D__STDC_FORMAT_MACROS libponyc.tests.buildoptions += -D__STDC_LIMIT_MACROS libponyc.tests.buildoptions += -DPONY_ALWAYS_ASSERT libponyc.tests.buildoptions += -DPONY_PACKAGES_DIR=\"$(packages_abs_src)\" libponyc.tests.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE) libponyc.tests.linkoptions += -rdynamic ifneq ($(ALPINE),) libponyc.tests.linkoptions += -lexecinfo endif # link statically # TODO: uncomment when this is fixed #ifeq ($(link),static) # libponyc.tests.linkoptions += -static #endif libponyc.benchmarks.buildoptions = -D__STDC_CONSTANT_MACROS libponyc.benchmarks.buildoptions += -D__STDC_FORMAT_MACROS libponyc.benchmarks.buildoptions += -D__STDC_LIMIT_MACROS libponyc.benchmarks.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE) libgbenchmark.buildoptions := \ -Wshadow -pedantic -pedantic-errors \ -Wfloat-equal -fstrict-aliasing -Wstrict-aliasing -Wno-invalid-offsetof \ -DHAVE_POSIX_REGEX -DHAVE_STD_REGEX -DHAVE_STEADY_CLOCK ifneq ($(ALPINE),) libponyc.benchmarks.linkoptions += -lexecinfo libponyrt.benchmarks.linkoptions += -lexecinfo endif # link statically ifeq ($(link),static) libponyc.benchmarks.linkoptions += -static libponyrt.benchmarks.linkoptions += -static endif ponyc.buildoptions = $(libponyc.buildoptions) ponyc.linkoptions += -rdynamic ifneq ($(ALPINE),) ponyc.linkoptions += -lexecinfo BUILD_FLAGS += -DALPINE_LINUX endif # link statically ifeq ($(link),static) ponyc.linkoptions += -static endif ifeq ($(OSTYPE), linux) libponyrt-pic.buildoptions += -fpic libponyrt-pic.buildoptions-ll += -relocation-model=pic endif # Set default PIC for compiling if requested ifdef default_pic ifeq (true,$(default_pic)) libponyrt.buildoptions += -fpic libponyrt.buildoptions-ll += -relocation-model=pic BUILD_FLAGS += -DPONY_DEFAULT_PIC=true else ifneq (false,$(default_pic)) $(error default_pic must be true or false) endif endif endif # target specific disabling of build options libgtest.disable = -Wconversion -Wno-sign-conversion -Wextra libgbenchmark.disable = -Wconversion -Wno-sign-conversion libblake2.disable = -Wconversion -Wno-sign-conversion -Wextra # Link relationships. ponyc.links = libponyc libponyrt llvm libblake2 libponyc.tests.links = libgtest libponyc llvm libblake2 libponyc.tests.links.whole = libponyrt libponyrt.tests.links = libgtest libponyrt libponyc.benchmarks.links = libblake2 libgbenchmark libponyc libponyrt llvm libponyrt.benchmarks.links = libgbenchmark libponyrt ifeq ($(OSTYPE),linux) ponyc.links += libpthread libdl libatomic libponyc.tests.links += libpthread libdl libatomic libponyrt.tests.links += libpthread libdl libatomic libponyc.benchmarks.links += libpthread libdl libatomic libponyrt.benchmarks.links += libpthread libdl libatomic endif ifeq ($(OSTYPE),bsd) libponyc.tests.links += libpthread libponyrt.tests.links += $(extra.bsd.libs) libponyc.benchmarks.links += libpthread libponyrt.benchmarks.links += $(extra.bsd.libs) endif ifneq (, $(DTRACE)) $(shell $(DTRACE) -h -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_SOURCE_DIR)/common/dtrace_probes.h) endif # Overwrite the default linker for a target. ponyc.linker = $(CXX) #compile as C but link as CPP (llvm) libponyc.benchmarks.linker = $(CXX) libponyrt.benchmarks.linker = $(CXX) # make targets targets := $(libraries) libponyrt $(binaries) $(tests) $(benchmarks) .PHONY: all $(targets) install uninstall clean stats deploy prerelease check-version test-core test-stdlib-debug test-stdlib test-examples validate-grammar test-ci test-cross-ci benchmark stdlib stdlib-debug all: $(targets) @: # Dependencies libponyc.depends := libponyrt libblake2 libponyc.tests.depends := libponyc libgtest libponyrt.tests.depends := libponyrt libgtest libponyc.benchmarks.depends := libponyc libgbenchmark libponyrt.benchmarks.depends := libponyrt libgbenchmark ponyc.depends := libponyc libponyrt # Generic make section, edit with care. ########################################################################## # # # DIRECTORY: Determines the source dir of a specific target # # # # ENUMERATE: Enumerates input and output files for a specific target # # # # CONFIGURE_COMPILER: Chooses a C or C++ compiler depending on the # # target file. # # # # CONFIGURE_LIBS: Builds a string of libraries to link for a targets # # link dependency. # # # # CONFIGURE_LINKER: Assembles the linker flags required for a target. # # # # EXPAND_COMMAND: Macro that expands to a proper make command for each # # target. # # # ########################################################################## define DIRECTORY $(eval sourcedir := ) $(eval outdir := $(obj)/$(1)) ifdef $(1).srcdir sourcedir := $($(1).srcdir) else ifdef $(1).dir sourcedir := $($(1).dir) else ifneq ($$(filter $(1),$(tests)),) sourcedir := $(PONY_TEST_DIR)/$(subst .tests,,$(1)) outdir := $(obj)/tests/$(subst .tests,,$(1)) else ifneq ($$(filter $(1),$(benchmarks)),) sourcedir := $(PONY_BENCHMARK_DIR)/$(subst .benchmarks,,$(1)) outdir := $(obj)/benchmarks/$(subst .benchmarks,,$(1)) else sourcedir := $(PONY_SOURCE_DIR)/$(1) endif endef define ENUMERATE $(eval sourcefiles := ) ifdef $(1).files sourcefiles := $$($(1).files) else sourcefiles := $$(shell find $$(sourcedir) -type f -name "*.c" -or -name\ "*.cc" -or -name "*.ll" | grep -v '.*/\.') endif ifdef $(1).except sourcefiles := $$(filter-out $($(1).except),$$(sourcefiles)) endif endef define CONFIGURE_COMPILER ifeq ($(suffix $(1)),.cc) compiler := $(CXX) flags := $(ALL_CXXFLAGS) $(CXXFLAGS) endif ifeq ($(suffix $(1)),.c) compiler := $(CC) flags := $(ALL_CFLAGS) $(CFLAGS) endif ifeq ($(suffix $(1)),.bc) compiler := $(CC) flags := $(ALL_CFLAGS) $(CFLAGS) endif ifeq ($(suffix $(1)),.ll) compiler := $(CC) flags := $(ALL_CFLAGS) $(CFLAGS) -Wno-override-module endif endef define CONFIGURE_LIBS ifneq (,$$(filter $(1),$(prebuilt))) linkcmd += $($(1).ldflags) libs += $($(1).libs) else libs += $(subst lib,-l,$(1)) endif endef define CONFIGURE_LIBS_WHOLE ifeq ($(OSTYPE),osx) wholelibs += -Wl,-force_load,$(lib)/$(1).a else wholelibs += $(subst lib,-l,$(1)) endif endef define CONFIGURE_LINKER_WHOLE $(eval wholelibs :=) ifneq ($($(1).links.whole),) $(foreach lk,$($(1).links.whole),$(eval $(call CONFIGURE_LIBS_WHOLE,$(lk)))) ifeq ($(OSTYPE),osx) libs += $(wholelibs) else libs += -Wl,--whole-archive $(wholelibs) -Wl,--no-whole-archive endif endif endef define CONFIGURE_LINKER $(eval linkcmd := $(LINKER_FLAGS) -L $(lib)) $(eval linker := $(CC)) $(eval libs :=) ifdef $(1).linker linker := $($(1).linker) else ifneq (,$$(filter .cc,$(suffix $(sourcefiles)))) linker := $(CXX) endif $(eval $(call CONFIGURE_LINKER_WHOLE,$(1))) $(foreach lk,$($(1).links),$(eval $(call CONFIGURE_LIBS,$(lk)))) linkcmd += $(libs) -L $(CROSS_SYSROOT)/usr/local/lib $($(1).linkoptions) endef define PREPARE $(eval $(call DIRECTORY,$(1))) $(eval $(call ENUMERATE,$(1))) $(eval $(call CONFIGURE_LINKER,$(1))) $(eval objectfiles := $(subst $(sourcedir)/,$(outdir)/,$(addsuffix .o,\ $(sourcefiles)))) $(eval bitcodefiles := $(subst .o,.bc,$(objectfiles))) $(eval dependencies := $(subst .c,,$(subst .cc,,$(subst .ll,,$(subst .o,.d,\ $(objectfiles)))))) endef define EXPAND_OBJCMD $(eval file := $(subst .o,,$(1))) $(eval $(call CONFIGURE_COMPILER,$(file))) ifeq ($(3),libponyrtyes) ifneq ($(suffix $(file)),.bc) $(subst .c,,$(subst .cc,,$(subst .ll,,$(1)))): $(subst .c,.bc,$(subst .cc,.bc,$(subst .ll,.bc,$(file)))) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(compiler) $(flags) -c -o $$@ $$< else ifeq ($(suffix $(subst .bc,,$(file))),.ll) $(subst .ll,,$(1)): $(subst $(outdir)/,$(sourcedir)/,$(subst .bc,,$(file))) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(LLVM_AS) -o $$@ $$< else $(subst .c,,$(subst .cc,,$(1))): $(subst $(outdir)/,$(sourcedir)/,$(subst .bc,,$(file))) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(compiler) -MMD -MP $(filter-out $($(2).disable),$(BUILD_FLAGS)) \ $(flags) $($(2).buildoptions) -emit-llvm -c -o $$@ $$< $($(2).include) endif else ifeq ($(suffix $(file)),.ll) $(subst .ll,,$(1)): $(subst $(outdir)/,$(sourcedir)/,$(file)) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(LLVM_LLC) $(LL_FLAGS) $($(2).buildoptions-ll) -filetype=obj -o $$@ $$< else $(subst .c,,$(subst .cc,,$(1))): $(subst $(outdir)/,$(sourcedir)/,$(file)) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(compiler) -MMD -MP $(filter-out $($(2).disable),$(BUILD_FLAGS)) \ $(flags) $($(2).buildoptions) -c -o $$@ $$< $($(2).include) endif endef define EXPAND_COMMAND $(eval $(call PREPARE,$(1))) $(eval ofiles := $(subst .c,,$(subst .cc,,$(subst .ll,,$(objectfiles))))) $(eval bcfiles := $(subst .c,,$(subst .cc,,$(subst .ll,,$(bitcodefiles))))) $(eval depends := ) $(foreach d,$($(1).depends),$(eval depends += $($(d))/$(d).$(LIB_EXT))) ifeq ($(1),libponyrt) $($(1))/libponyrt.$(LIB_EXT): $(depends) $(ofiles) @mkdir -p $$(dir $$@) @echo 'Linking libponyrt' ifneq (,$(DTRACE)) ifeq ($(OSTYPE), linux) @echo 'Generating dtrace object file (linux)' $(SILENT)$(DTRACE) -G -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_BUILD_DIR)/dtrace_probes.o $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(PONY_BUILD_DIR)/dtrace_probes.o else ifeq ($(OSTYPE), bsd) @echo 'Generating dtrace object file (bsd)' $(SILENT)rm -f $(PONY_BUILD_DIR)/dtrace_probes.o $(SILENT)$(DTRACE) -G -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_BUILD_DIR)/dtrace_probes.o $(ofiles) $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(PONY_BUILD_DIR)/dtrace_probes.o $(SILENT)$(AR) $(AR_FLAGS) $(lib)/libdtrace_probes.a $(PONY_BUILD_DIR)/dtrace_probes.o else $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) endif else $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) endif ifeq ($(runtime-bitcode),yes) $($(1))/libponyrt.bc: $(depends) $(bcfiles) @mkdir -p $$(dir $$@) @echo 'Generating bitcode for libponyrt' $(SILENT)$(LLVM_LINK) -o $$@ $(bcfiles) ifeq ($(config),release) $(SILENT)$(LLVM_OPT) -O3 -o $$@ $$@ endif libponyrt: $($(1))/libponyrt.bc $($(1))/libponyrt.$(LIB_EXT) else libponyrt: $($(1))/libponyrt.$(LIB_EXT) endif else ifneq ($(filter $(1),$(libraries)),) $($(1))/$(1).$(LIB_EXT): $(depends) $(ofiles) @mkdir -p $$(dir $$@) @echo 'Linking $(1)' $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(1): $($(1))/$(1).$(LIB_EXT) else $($(1))/$(1): $(depends) $(ofiles) @mkdir -p $$(dir $$@) @echo 'Linking $(1)' $(SILENT)$(linker) -o $$@ $(ofiles) $(linkcmd) $(1): $($(1))/$(1) endif $(foreach bcfile,$(bitcodefiles),$(eval $(call EXPAND_OBJCMD,$(bcfile),$(1),$(addsuffix $(runtime-bitcode),$(1))))) $(foreach ofile,$(objectfiles),$(eval $(call EXPAND_OBJCMD,$(ofile),$(1),$(addsuffix $(runtime-bitcode),$(1))))) -include $(dependencies) endef $(foreach target,$(targets),$(eval $(call EXPAND_COMMAND,$(target)))) define EXPAND_INSTALL ifeq ($(OSTYPE),linux) install-libponyrt-pic: libponyrt-pic @mkdir -p $(destdir)/lib/$(arch) $(SILENT)cp $(lib)/libponyrt-pic.a $(DESTDIR)$(ponydir)/lib/$(arch) endif install-libponyrt: libponyrt @mkdir -p $(destdir)/lib/$(arch) $(SILENT)cp $(lib)/libponyrt.a $(DESTDIR)$(ponydir)/lib/$(arch) ifeq ($(OSTYPE),linux) install: libponyc libponyrt libponyrt-pic ponyc else install: libponyc libponyrt ponyc endif @mkdir -p $(DESTDIR)$(ponydir)/bin @mkdir -p $(DESTDIR)$(ponydir)/lib/$(arch) @mkdir -p $(DESTDIR)$(ponydir)/include/pony/detail $(SILENT)cp $(lib)/libponyrt.a $(DESTDIR)$(ponydir)/lib/$(arch) ifeq ($(OSTYPE),linux) $(SILENT)cp $(lib)/libponyrt-pic.a $(DESTDIR)$(ponydir)/lib/$(arch) endif ifneq ($(wildcard $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.bc),) $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.bc $(DESTDIR)$(ponydir)/lib/$(arch) endif ifneq ($(wildcard $(lib)/libdtrace_probes.a),) $(SILENT)cp $(lib)/libdtrace_probes.a $(DESTDIR)$(ponydir)/lib/$(arch) endif $(SILENT)cp $(lib)/libponyc.a $(DESTDIR)$(ponydir)/lib/$(arch) $(SILENT)cp $(bin)/ponyc $(DESTDIR)$(ponydir)/bin $(SILENT)cp src/libponyrt/pony.h $(DESTDIR)$(ponydir)/include $(SILENT)cp src/common/pony/detail/atomics.h $(DESTDIR)$(ponydir)/include/pony/detail $(SILENT)cp -r packages $(DESTDIR)$(ponydir)/ ifeq ($$(symlink),yes) @mkdir -p $(DESTDIR)$(bindir) @mkdir -p $(DESTDIR)$(libdir) @mkdir -p $(DESTDIR)$(includedir)/pony/detail $(SILENT)ln $(symlink.flags) $(ponydir)/bin/ponyc $(DESTDIR)$(bindir)/ponyc $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyrt.a $(DESTDIR)$(libdir)/libponyrt.a ifeq ($(OSTYPE),linux) $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyrt-pic.a $(DESTDIR)$(libdir)/libponyrt-pic.a endif ifneq ($(wildcard $(DESTDIR)$(ponydir)/lib/libponyrt.bc),) $(SILENT)ln $(symlink.flags) $(ponydir)/lib/libponyrt.bc $(DESTDIR)$(libdir)/libponyrt.bc endif ifneq ($(wildcard $(PONY_BUILD_DIR)/libdtrace_probes.a),) $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libdtrace_probes.a $(DESTDIR)$(libdir)/libdtrace_probes.a endif $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyc.a $(DESTDIR)$(libdir)/libponyc.a $(SILENT)ln $(symlink.flags) $(ponydir)/include/pony.h $(DESTDIR)$(includedir)/pony.h $(SILENT)ln $(symlink.flags) $(ponydir)/include/pony/detail/atomics.h $(DESTDIR)$(includedir)/pony/detail/atomics.h endif endef $(eval $(call EXPAND_INSTALL)) define EXPAND_UNINSTALL uninstall: -$(SILENT)rm -rf $(ponydir) 2>/dev/null ||: -$(SILENT)rm $(bindir)/ponyc 2>/dev/null ||: -$(SILENT)rm $(libdir)/libponyrt.a 2>/dev/null ||: ifeq ($(OSTYPE),linux) -$(SILENT)rm $(libdir)/libponyrt-pic.a 2>/dev/null ||: endif ifneq ($(wildcard $(libdir)/libponyrt.bc),) -$(SILENT)rm $(libdir)/libponyrt.bc 2>/dev/null ||: endif ifneq ($(wildcard $(libdir)/libdtrace_probes.a),) -$(SILENT)rm $(libdir)/libdtrace_probes.a 2>/dev/null ||: endif -$(SILENT)rm $(libdir)/libponyc.a 2>/dev/null ||: -$(SILENT)rm $(includedir)/pony.h 2>/dev/null ||: -$(SILENT)rm -r $(includedir)/pony/ 2>/dev/null ||: endef $(eval $(call EXPAND_UNINSTALL)) ifdef verbose bench_verbose = -DCMAKE_VERBOSE_MAKEFILE=true endif ifeq ($(lto),yes) bench_lto = -DBENCHMARK_ENABLE_LTO=true endif benchmark: all $(SILENT)echo "Running libponyc benchmarks..." $(SILENT)$(PONY_BUILD_DIR)/libponyc.benchmarks $(SILENT)echo "Running libponyrt benchmarks..." $(SILENT)$(PONY_BUILD_DIR)/libponyrt.benchmarks stdlib-debug: all $(SILENT)PONYPATH=.:$(PONYPATH) $(PONY_BUILD_DIR)/ponyc $(cross_args) -d -s --checktree --verify packages/stdlib stdlib: all $(SILENT)PONYPATH=.:$(PONYPATH) $(PONY_BUILD_DIR)/ponyc $(cross_args) --checktree --verify packages/stdlib test-stdlib-debug: stdlib-debug $(SILENT)$(cross_runner) ./stdlib --sequential $(SILENT)rm stdlib test-stdlib: stdlib $(SILENT)$(cross_runner) ./stdlib --sequential $(SILENT)rm stdlib test-core: all $(SILENT)$(PONY_BUILD_DIR)/libponyc.tests --gtest_shuffle $(SILENT)$(PONY_BUILD_DIR)/libponyrt.tests --gtest_shuffle test: test-core test-stdlib test-examples test-examples: all $(SILENT)PONYPATH=.:$(PONYPATH) find examples/*/* -name '*.pony' -print | xargs -n 1 dirname | sort -u | grep -v ffi- | xargs -n 1 -I {} $(PONY_BUILD_DIR)/ponyc $(cross_args) -d -s --checktree -o {} {} check-version: all $(SILENT)$(PONY_BUILD_DIR)/ponyc --version validate-grammar: all $(SILENT)$(PONY_BUILD_DIR)/ponyc --antlr > pony.g.new $(SILENT)diff pony.g pony.g.new $(SILENT)rm pony.g.new test-ci: all check-version test-core test-stdlib-debug test-stdlib test-examples validate-grammar test-cross-ci: cross_args=--triple=$(cross_triple) --cpu=$(cross_cpu) --link-arch=$(cross_arch) --linker='$(cross_linker)' test-cross-ci: cross_runner=$(QEMU_RUNNER) test-cross-ci: test-ci docs: all $(SILENT)$(PONY_BUILD_DIR)/ponyc packages/stdlib --docs-public --pass expr # Note: linux only define EXPAND_DEPLOY deploy: test docs $(SILENT)bash .bintray.bash debian "$(package_base_version)" "$(package_name)" $(SILENT)bash .bintray.bash rpm "$(package_base_version)" "$(package_name)" $(SILENT)bash .bintray.bash source "$(package_base_version)" "$(package_name)" $(SILENT)rm -rf build/bin @mkdir -p build/bin @mkdir -p $(package)/usr/bin @mkdir -p $(package)/usr/include/pony/detail @mkdir -p $(package)/usr/lib @mkdir -p $(package)/usr/lib/pony/$(package_version)/bin @mkdir -p $(package)/usr/lib/pony/$(package_version)/include/pony/detail @mkdir -p $(package)/usr/lib/pony/$(package_version)/lib $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyc.a $(package)/usr/lib/pony/$(package_version)/lib $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.a $(package)/usr/lib/pony/$(package_version)/lib ifeq ($(OSTYPE),linux) $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt-pic.a $(package)/usr/lib/pony/$(package_version)/lib endif ifneq ($(wildcard $(PONY_BUILD_DIR)/libponyrt.bc),) $(SILENT)cp $(PONY_BUILD_DIR)/libponyrt.bc $(package)/usr/lib/pony/$(package_version)/lib endif ifneq ($(wildcard $(PONY_BUILD_DIR)/libdtrace_probes.a),) $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libdtrace_probes.a $(package)/usr/lib/pony/$(package_version)/lib endif $(SILENT)cp $(PONY_BUILD_DIR)/ponyc $(package)/usr/lib/pony/$(package_version)/bin $(SILENT)cp src/libponyrt/pony.h $(package)/usr/lib/pony/$(package_version)/include $(SILENT)cp src/common/pony/detail/atomics.h $(package)/usr/lib/pony/$(package_version)/include/pony/detail $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt.a $(package)/usr/lib/libponyrt.a ifeq ($(OSTYPE),linux) $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt-pic.a $(package)/usr/lib/libponyrt-pic.a endif ifneq ($(wildcard /usr/lib/pony/$(package_version)/lib/libponyrt.bc),) $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt.bc $(package)/usr/lib/libponyrt.bc endif ifneq ($(wildcard /usr/lib/pony/$(package_version)/lib/libdtrace_probes.a),) $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libdtrace_probes.a $(package)/usr/lib/libdtrace_probes.a endif $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyc.a $(package)/usr/lib/libponyc.a $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/bin/ponyc $(package)/usr/bin/ponyc $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/include/pony.h $(package)/usr/include/pony.h $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/include/pony/detail/atomics.h $(package)/usr/include/pony/detail/atomics.h $(SILENT)cp -r packages $(package)/usr/lib/pony/$(package_version)/ $(SILENT)fpm -s dir -t deb -C $(package) -p build/bin --name $(package_name) --conflicts "ponyc-master" --conflicts "ponyc-release" --version $(package_base_version) --description "The Pony Compiler" --provides "ponyc" --provides "ponyc-release" $(SILENT)fpm -s dir -t rpm -C $(package) -p build/bin --name $(package_name) --conflicts "ponyc-master" --conflicts "ponyc-release" --version $(package_base_version) --description "The Pony Compiler" --provides "ponyc" --provides "ponyc-release" --depends "ponydep-ncurses" $(SILENT)git archive HEAD > build/bin/$(archive) $(SILENT)tar rvf build/bin/$(archive) stdlib-docs $(SILENT)bzip2 build/bin/$(archive) $(SILENT)rm -rf $(package) build/bin/$(archive) endef $(eval $(call EXPAND_DEPLOY)) stats: @echo @echo '------------------------------' @echo 'Compiler and standard library ' @echo '------------------------------' @echo @cloc --read-lang-def=pony.cloc src packages @echo @echo '------------------------------' @echo 'Test suite:' @echo '------------------------------' @echo @cloc --read-lang-def=pony.cloc test clean: @rm -rf $(PONY_BUILD_DIR) @rm -rf $(package) @rm -rf build/bin @rm -rf stdlib-docs @rm -f src/common/dtrace_probes.h -@rmdir build 2>/dev/null ||: @echo 'Repository cleaned ($(PONY_BUILD_DIR)).' help: @echo 'Usage: make [config=name] [options] [use=opt,...] [target]' @echo @echo 'CONFIGURATIONS:' @echo ' debug' @echo ' release (default)' @echo @echo 'ARCHITECTURE:' @echo ' native (default)' @echo ' [any compiler supported architecture]' @echo @echo 'options:' @echo ' arch=Name Architecture if Name not specified then host name' @echo ' default_pic=true Make --pic the default' @echo ' link=XXX ponyc linking strategy' @echo ' static (completely static only works with musl)' @echo ' llvm-static (statically link LLVM)' @echo ' llvm-dynamic (dynamically link LLVM)' @echo @echo 'USE OPTIONS:' @echo ' valgrind' @echo ' pooltrack' @echo ' dtrace' @echo ' actor_continuations' @echo ' coverage' @echo ' scheduler_scaling_pthreads' @echo ' thread_sanitizer' @echo ' address_sanitizer' @echo ' undefined_behavior_sanitizer' @echo ' memtrack' @echo ' memtrack_messages' @echo @echo 'TARGETS:' @echo ' libponyc Pony compiler library' @echo ' libponyrt Pony runtime' @echo ' libponyrt-pic Pony runtime -fpic' @echo ' libponyc.tests Test suite for libponyc' @echo ' libponyrt.tests Test suite for libponyrt' @echo ' libponyc.benchmarks Benchmark suite for libponyc' @echo ' libponyrt.benchmarks Benchmark suite for libponyrt' @echo ' ponyc Pony compiler executable' @echo @echo ' all Build all of the above (default)' @echo ' test Run test suite' @echo ' benchmark Build and run benchmark suite' @echo ' install Install ponyc' @echo ' install-libponyrt Install libponyrt only (for cross' @echo ' linking)' @echo ' install-libponyrt-pic Install libponyrt-pic only (for cross' @echo ' linking)' @echo ' uninstall Remove all versions of ponyc' @echo ' stats Print Pony cloc statistics' @echo ' clean Delete all build files' @echo
1
14,039
The change here is to address what exactly?
ponylang-ponyc
c
@@ -101,9 +101,14 @@ const ( // Options sets options for constructing a *blob.Bucket backed by Azure Block Blob. type Options struct { // Credential represents the authorizer for SignedURL. - // Required to use SignedURL. + // Required to use SignedURL. If you're using MSI for authentication, this will + // attempt to be loaded lazily the first time you call SignedURL. Credential azblob.StorageAccountCredential + // IsMSIEnvironment represents if the blob storage is being authenticated via + // a Managed System Identity. + IsMSIEnvironment bool + // SASToken can be provided along with anonymous credentials to use // delegated privileges. // See https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1#shared-access-signature-parameters.
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package azureblob provides a blob implementation that uses Azure Storage’s // BlockBlob. Use OpenBucket to construct a *blob.Bucket. // // NOTE: SignedURLs for PUT created with this package are not fully portable; // they will not work unless the PUT request includes a "x-ms-blob-type" header // set to "BlockBlob". // See https://stackoverflow.com/questions/37824136/put-on-sas-blob-url-without-specifying-x-ms-blob-type-header. // // URLs // // For blob.OpenBucket, azureblob registers for the scheme "azblob". // The default URL opener will use credentials from the environment variables // AZURE_STORAGE_ACCOUNT, AZURE_STORAGE_KEY, and AZURE_STORAGE_SAS_TOKEN. // AZURE_STORAGE_ACCOUNT is required, along with one of the other two. // AZURE_STORAGE_DOMAIN can optionally be used to provide an Azure Environment // blob storage domain to use. If no AZURE_STORAGE_DOMAIN is provided, the // default Azure public domain "blob.core.windows.net" will be used. Check // the Azure Developer Guide for your particular cloud environment to see // the proper blob storage domain name to provide. // To customize the URL opener, or for more details on the URL format, // see URLOpener. // See https://gocloud.dev/concepts/urls/ for background information. // // Escaping // // Go CDK supports all UTF-8 strings; to make this work with services lacking // full UTF-8 support, strings must be escaped (during writes) and unescaped // (during reads). The following escapes are performed for azureblob: // - Blob keys: ASCII characters 0-31, 92 ("\"), and 127 are escaped to // "__0x<hex>__". Additionally, the "/" in "../" and a trailing "/" in a // key (e.g., "foo/") are escaped in the same way. // - Metadata keys: Per https://docs.microsoft.com/en-us/azure/storage/blobs/storage-properties-metadata, // Azure only allows C# identifiers as metadata keys. Therefore, characters // other than "[a-z][A-z][0-9]_" are escaped using "__0x<hex>__". In addition, // characters "[0-9]" are escaped when they start the string. // URL encoding would not work since "%" is not valid. // - Metadata values: Escaped using URL encoding. // // As // // azureblob exposes the following types for As: // - Bucket: *azblob.ContainerURL // - Error: azblob.StorageError // - ListObject: azblob.BlobItemInternal for objects, azblob.BlobPrefix for "directories" // - ListOptions.BeforeList: *azblob.ListBlobsSegmentOptions // - Reader: azblob.DownloadResponse // - Reader.BeforeRead: *azblob.BlockBlobURL, *azblob.BlobAccessConditions // - Attributes: azblob.BlobGetPropertiesResponse // - CopyOptions.BeforeCopy: azblob.Metadata, *azblob.ModifiedAccessConditions, *azblob.BlobAccessConditions // - WriterOptions.BeforeWrite: *azblob.UploadStreamToBlockBlobOptions // - SignedURLOptions.BeforeSign: *azblob.BlobSASSignatureValues package azureblob import ( "context" "errors" "fmt" "io" "net/http" "net/url" "os" "sort" "strconv" "strings" "sync" "time" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/google/uuid" "github.com/google/wire" "gocloud.dev/blob" "gocloud.dev/blob/driver" "gocloud.dev/gcerrors" "gocloud.dev/internal/escape" "gocloud.dev/internal/gcerr" "gocloud.dev/internal/useragent" ) const ( tokenRefreshTolerance = 300 ) // Options sets options for constructing a *blob.Bucket backed by Azure Block Blob. type Options struct { // Credential represents the authorizer for SignedURL. // Required to use SignedURL. Credential azblob.StorageAccountCredential // SASToken can be provided along with anonymous credentials to use // delegated privileges. // See https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1#shared-access-signature-parameters. SASToken SASToken // StorageDomain can be provided to specify an Azure Cloud Environment // domain to target for the blob storage account (i.e. public, government, china). // The default value is "blob.core.windows.net". Possible values will look similar // to this but are different for each cloud (i.e. "blob.core.govcloudapi.net" for USGovernment). // Check the Azure developer guide for the cloud environment where your bucket resides. StorageDomain StorageDomain // Protocol can be provided to specify protocol to access Azure Blob Storage. // Protocols that can be specified are "http" for local emulator and "https" for general. // If blank is specified, "https" will be used. Protocol Protocol } const ( defaultMaxDownloadRetryRequests = 3 // download retry policy (Azure default is zero) defaultPageSize = 1000 // default page size for ListPaged (Azure default is 5000) defaultUploadBuffers = 5 // configure the number of rotating buffers that are used when uploading (for degree of parallelism) defaultUploadBlockSize = 8 * 1024 * 1024 // configure the upload buffer size ) func init() { blob.DefaultURLMux().RegisterBucket(Scheme, new(lazyCredsOpener)) } // Set holds Wire providers for this package. var Set = wire.NewSet( NewPipeline, wire.Struct(new(Options), "Credential", "SASToken"), wire.Struct(new(URLOpener), "AccountName", "Pipeline", "Options"), ) // lazyCredsOpener obtains credentials from the environment on the first call // to OpenBucketURL. type lazyCredsOpener struct { init sync.Once opener *URLOpener err error } func (o *lazyCredsOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) { o.init.Do(func() { // Use default credential info from the environment. // Ignore errors, as we'll get errors from OpenBucket later. accountName, _ := DefaultAccountName() accountKey, _ := DefaultAccountKey() sasToken, _ := DefaultSASToken() storageDomain, _ := DefaultStorageDomain() protocol, _ := DefaultProtocol() isMSIEnvironment := adal.MSIAvailable(ctx, adal.CreateSender()) if accountKey != "" { o.opener, o.err = openerFromEnv(accountName, accountKey, sasToken, storageDomain, protocol) } else if isMSIEnvironment { o.opener, o.err = openerFromMSI(accountName, storageDomain, protocol) } else { o.opener, o.err = openerFromAnon(accountName, storageDomain, protocol) } }) if o.err != nil { return nil, fmt.Errorf("open bucket %v: %v", u, o.err) } return o.opener.OpenBucketURL(ctx, u) } // Scheme is the URL scheme gcsblob registers its URLOpener under on // blob.DefaultMux. const Scheme = "azblob" // URLOpener opens Azure URLs like "azblob://mybucket". // // The URL host is used as the bucket name. // // The following query options are supported: // - domain: The domain name used to access the Azure Blob storage (e.g. blob.core.windows.net) type URLOpener struct { // AccountName must be specified. AccountName AccountName // Pipeline must be set to a non-nil value. Pipeline pipeline.Pipeline // Options specifies the options to pass to OpenBucket. Options Options } func openerFromEnv(accountName AccountName, accountKey AccountKey, sasToken SASToken, storageDomain StorageDomain, protocol Protocol) (*URLOpener, error) { // azblob.Credential is an interface; we will use either a SharedKeyCredential // or anonymous credentials. If the former, we will also fill in // Options.Credential so that SignedURL will work. var credential azblob.Credential var storageAccountCredential azblob.StorageAccountCredential if accountKey != "" { sharedKeyCred, err := NewCredential(accountName, accountKey) if err != nil { return nil, fmt.Errorf("invalid credentials %s/%s: %v", accountName, accountKey, err) } credential = sharedKeyCred storageAccountCredential = sharedKeyCred } else { credential = azblob.NewAnonymousCredential() } return &URLOpener{ AccountName: accountName, Pipeline: NewPipeline(credential, azblob.PipelineOptions{}), Options: Options{ Credential: storageAccountCredential, SASToken: sasToken, StorageDomain: storageDomain, Protocol: protocol, }, }, nil } // openerFromAnon creates an anonymous credential backend URLOpener func openerFromAnon(accountName AccountName, storageDomain StorageDomain, protocol Protocol) (*URLOpener, error) { return &URLOpener{ AccountName: accountName, Pipeline: NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}), Options: Options{ StorageDomain: storageDomain, Protocol: protocol, }, }, nil } var defaultTokenRefreshFunction = func(spToken *adal.ServicePrincipalToken) func(credential azblob.TokenCredential) time.Duration { return func(credential azblob.TokenCredential) time.Duration { err := spToken.Refresh() if err != nil { return 0 } expiresIn, err := strconv.ParseInt(string(spToken.Token().ExpiresIn), 10, 64) if err != nil { return 0 } credential.SetToken(spToken.Token().AccessToken) return time.Duration(expiresIn-tokenRefreshTolerance) * time.Second } } // openerFromMSI acquires an MSI token and returns TokenCredential backed URLOpener func openerFromMSI(accountName AccountName, storageDomain StorageDomain, protocol Protocol) (*URLOpener, error) { spToken, err := getMSIServicePrincipalToken(azure.PublicCloud.ResourceIdentifiers.Storage) if err != nil { return nil, fmt.Errorf("failure acquiring token from MSI endpoint %w", err) } err = spToken.Refresh() if err != nil { return nil, fmt.Errorf("failure refreshing token from MSI endpoint %w", err) } credential := azblob.NewTokenCredential(spToken.Token().AccessToken, defaultTokenRefreshFunction(spToken)) return &URLOpener{ AccountName: accountName, Pipeline: NewPipeline(credential, azblob.PipelineOptions{}), Options: Options{ StorageDomain: storageDomain, Protocol: protocol, }, }, nil } // getMSIServicePrincipalToken retrieves Azure API Service Principal token. func getMSIServicePrincipalToken(resource string) (*adal.ServicePrincipalToken, error) { msiEndpoint, err := adal.GetMSIEndpoint() if err != nil { return nil, fmt.Errorf("failed to get the managed service identity endpoint: %v", err) } token, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, resource) if err != nil { return nil, fmt.Errorf("failed to create the managed service identity token: %v", err) } return token, nil } // OpenBucketURL opens a blob.Bucket based on u. func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) { opts := new(Options) *opts = o.Options err := setOptionsFromURLParams(u.Query(), opts) if err != nil { return nil, err } return OpenBucket(ctx, o.Pipeline, o.AccountName, u.Host, opts) } func setOptionsFromURLParams(q url.Values, o *Options) error { for param, values := range q { if len(values) > 1 { return fmt.Errorf("multiple values of %v not allowed", param) } value := values[0] switch param { case "domain": o.StorageDomain = StorageDomain(value) default: return fmt.Errorf("unknown query parameter %q", param) } } return nil } // DefaultIdentity is a Wire provider set that provides an Azure storage // account name, key, and SharedKeyCredential from environment variables. var DefaultIdentity = wire.NewSet( DefaultAccountName, DefaultAccountKey, NewCredential, wire.Bind(new(azblob.Credential), new(*azblob.SharedKeyCredential)), wire.Value(azblob.PipelineOptions{}), ) // SASTokenIdentity is a Wire provider set that provides an Azure storage // account name, SASToken, and anonymous credential from environment variables. var SASTokenIdentity = wire.NewSet( DefaultAccountName, DefaultSASToken, azblob.NewAnonymousCredential, wire.Value(azblob.PipelineOptions{}), ) // AccountName is an Azure storage account name. type AccountName string // AccountKey is an Azure storage account key (primary or secondary). type AccountKey string // SASToken is an Azure shared access signature. // https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1 type SASToken string // StorageDomain is an Azure Cloud Environment domain name to target // (i.e. blob.core.windows.net, blob.core.govcloudapi.net, blob.core.chinacloudapi.cn). // It is read from the AZURE_STORAGE_DOMAIN environment variable. type StorageDomain string // Protocol is an protocol to access Azure Blob Storage. // It must be "http" or "https". // It is read from the AZURE_STORAGE_PROTOCOL environment variable. type Protocol string // DefaultAccountName loads the Azure storage account name from the // AZURE_STORAGE_ACCOUNT environment variable. func DefaultAccountName() (AccountName, error) { s := os.Getenv("AZURE_STORAGE_ACCOUNT") if s == "" { return "", errors.New("azureblob: environment variable AZURE_STORAGE_ACCOUNT not set") } return AccountName(s), nil } // DefaultAccountKey loads the Azure storage account key (primary or secondary) // from the AZURE_STORAGE_KEY environment variable. func DefaultAccountKey() (AccountKey, error) { s := os.Getenv("AZURE_STORAGE_KEY") if s == "" { return "", errors.New("azureblob: environment variable AZURE_STORAGE_KEY not set") } return AccountKey(s), nil } // DefaultSASToken loads a Azure SAS token from the AZURE_STORAGE_SAS_TOKEN // environment variable. func DefaultSASToken() (SASToken, error) { s := os.Getenv("AZURE_STORAGE_SAS_TOKEN") if s == "" { return "", errors.New("azureblob: environment variable AZURE_STORAGE_SAS_TOKEN not set") } return SASToken(s), nil } // DefaultStorageDomain loads the desired Azure Cloud to target from // the AZURE_STORAGE_DOMAIN environment variable. func DefaultStorageDomain() (StorageDomain, error) { s := os.Getenv("AZURE_STORAGE_DOMAIN") return StorageDomain(s), nil } // DefaultProtocol loads the protocol to access Azure Blob Storage from the // AZURE_STORAGE_PROTOCOL environment variable. func DefaultProtocol() (Protocol, error) { s := os.Getenv("AZURE_STORAGE_PROTOCOL") return Protocol(s), nil } // NewCredential creates a SharedKeyCredential. func NewCredential(accountName AccountName, accountKey AccountKey) (*azblob.SharedKeyCredential, error) { return azblob.NewSharedKeyCredential(string(accountName), string(accountKey)) } // NewPipeline creates a Pipeline for making HTTP requests to Azure. func NewPipeline(credential azblob.Credential, opts azblob.PipelineOptions) pipeline.Pipeline { opts.Telemetry.Value = useragent.AzureUserAgentPrefix("blob") + opts.Telemetry.Value return azblob.NewPipeline(credential, opts) } // bucket represents a Azure Storage Account Container, which handles read, // write and delete operations on objects within it. // See https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction. type bucket struct { name string pageMarkers map[string]azblob.Marker serviceURL *azblob.ServiceURL containerURL azblob.ContainerURL opts *Options } // OpenBucket returns a *blob.Bucket backed by Azure Storage Account. See the package // documentation for an example and // https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob // for more details. func OpenBucket(ctx context.Context, pipeline pipeline.Pipeline, accountName AccountName, containerName string, opts *Options) (*blob.Bucket, error) { b, err := openBucket(ctx, pipeline, accountName, containerName, opts) if err != nil { return nil, err } return blob.NewBucket(b), nil } func openBucket(ctx context.Context, pipeline pipeline.Pipeline, accountName AccountName, containerName string, opts *Options) (*bucket, error) { if pipeline == nil { return nil, errors.New("azureblob.OpenBucket: pipeline is required") } if accountName == "" { return nil, errors.New("azureblob.OpenBucket: accountName is required") } if containerName == "" { return nil, errors.New("azureblob.OpenBucket: containerName is required") } if opts == nil { opts = &Options{} } if opts.StorageDomain == "" { // If opts.StorageDomain is missing, use default domain. opts.StorageDomain = "blob.core.windows.net" } switch opts.Protocol { case "": // If opts.Protocol is missing, use "https". opts.Protocol = "https" case "https", "http": default: return nil, errors.New("azureblob.OpenBucket: protocol must be http or https") } d := string(opts.StorageDomain) var u string // The URL structure of the local emulator is a bit different from the real one. if strings.HasPrefix(d, "127.0.0.1") || strings.HasPrefix(d, "localhost") { u = fmt.Sprintf("%s://%s/%s", opts.Protocol, opts.StorageDomain, accountName) // http://127.0.0.1:10000/devstoreaccount1 } else { u = fmt.Sprintf("%s://%s.%s", opts.Protocol, accountName, opts.StorageDomain) // https://myaccount.blob.core.windows.net } blobURL, err := url.Parse(u) if err != nil { return nil, err } if opts.SASToken != "" { // The Azure portal includes a leading "?" for the SASToken, which we // don't want here. blobURL.RawQuery = strings.TrimPrefix(string(opts.SASToken), "?") } serviceURL := azblob.NewServiceURL(*blobURL, pipeline) return &bucket{ name: containerName, pageMarkers: map[string]azblob.Marker{}, serviceURL: &serviceURL, containerURL: serviceURL.NewContainerURL(containerName), opts: opts, }, nil } // Close implements driver.Close. func (b *bucket) Close() error { return nil } // Copy implements driver.Copy. func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error { dstKey = escapeKey(dstKey, false) dstBlobURL := b.containerURL.NewBlobURL(dstKey) srcKey = escapeKey(srcKey, false) srcURL := b.containerURL.NewBlobURL(srcKey).URL() md := azblob.Metadata{} mac := azblob.ModifiedAccessConditions{} bac := azblob.BlobAccessConditions{} at := azblob.AccessTierNone btm := azblob.BlobTagsMap{} if opts.BeforeCopy != nil { asFunc := func(i interface{}) bool { switch v := i.(type) { case *azblob.Metadata: *v = md return true case **azblob.ModifiedAccessConditions: *v = &mac return true case **azblob.BlobAccessConditions: *v = &bac return true } return false } if err := opts.BeforeCopy(asFunc); err != nil { return err } } resp, err := dstBlobURL.StartCopyFromURL(ctx, srcURL, md, mac, bac, at, btm) if err != nil { return err } copyStatus := resp.CopyStatus() nErrors := 0 for copyStatus == azblob.CopyStatusPending { // Poll until the copy is complete. time.Sleep(500 * time.Millisecond) propertiesResp, err := dstBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{}) if err != nil { // A GetProperties failure may be transient, so allow a couple // of them before giving up. nErrors++ if ctx.Err() != nil || nErrors == 3 { return err } } copyStatus = propertiesResp.CopyStatus() } if copyStatus != azblob.CopyStatusSuccess { return fmt.Errorf("Copy failed with status: %s", copyStatus) } return nil } // Delete implements driver.Delete. func (b *bucket) Delete(ctx context.Context, key string) error { key = escapeKey(key, false) blockBlobURL := b.containerURL.NewBlockBlobURL(key) _, err := blockBlobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) return err } // reader reads an azblob. It implements io.ReadCloser. type reader struct { body io.ReadCloser attrs driver.ReaderAttributes raw *azblob.DownloadResponse } func (r *reader) Read(p []byte) (int, error) { return r.body.Read(p) } func (r *reader) Close() error { return r.body.Close() } func (r *reader) Attributes() *driver.ReaderAttributes { return &r.attrs } func (r *reader) As(i interface{}) bool { p, ok := i.(*azblob.DownloadResponse) if !ok { return false } *p = *r.raw return true } // NewRangeReader implements driver.NewRangeReader. func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) { key = escapeKey(key, false) blockBlobURL := b.containerURL.NewBlockBlobURL(key) blockBlobURLp := &blockBlobURL accessConditions := &azblob.BlobAccessConditions{} end := length if end < 0 { end = azblob.CountToEnd } if opts.BeforeRead != nil { asFunc := func(i interface{}) bool { if p, ok := i.(**azblob.BlockBlobURL); ok { *p = blockBlobURLp return true } if p, ok := i.(**azblob.BlobAccessConditions); ok { *p = accessConditions return true } return false } if err := opts.BeforeRead(asFunc); err != nil { return nil, err } } blobDownloadResponse, err := blockBlobURLp.Download(ctx, offset, end, *accessConditions, false) if err != nil { return nil, err } attrs := driver.ReaderAttributes{ ContentType: blobDownloadResponse.ContentType(), Size: getSize(blobDownloadResponse.ContentLength(), blobDownloadResponse.ContentRange()), ModTime: blobDownloadResponse.LastModified(), } var body io.ReadCloser if length == 0 { body = http.NoBody } else { body = blobDownloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: defaultMaxDownloadRetryRequests}) } return &reader{ body: body, attrs: attrs, raw: blobDownloadResponse, }, nil } func getSize(contentLength int64, contentRange string) int64 { // Default size to ContentLength, but that's incorrect for partial-length reads, // where ContentLength refers to the size of the returned Body, not the entire // size of the blob. ContentRange has the full size. size := contentLength if contentRange != "" { // Sample: bytes 10-14/27 (where 27 is the full size). parts := strings.Split(contentRange, "/") if len(parts) == 2 { if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil { size = i } } } return size } // As implements driver.As. func (b *bucket) As(i interface{}) bool { p, ok := i.(**azblob.ContainerURL) if !ok { return false } *p = &b.containerURL return true } // As implements driver.ErrorAs. func (b *bucket) ErrorAs(err error, i interface{}) bool { switch v := err.(type) { case azblob.StorageError: if p, ok := i.(*azblob.StorageError); ok { *p = v return true } } return false } func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode { serr, ok := err.(azblob.StorageError) switch { case !ok: // This happens with an invalid storage account name; the host // is something like invalidstorageaccount.blob.core.windows.net. if strings.Contains(err.Error(), "no such host") { return gcerrors.NotFound } return gcerrors.Unknown case serr.ServiceCode() == azblob.ServiceCodeBlobNotFound || serr.Response().StatusCode == 404: // Check and fail both the SDK ServiceCode and the Http Response Code for NotFound return gcerrors.NotFound case serr.ServiceCode() == azblob.ServiceCodeAuthenticationFailed: return gcerrors.PermissionDenied default: return gcerrors.Unknown } } // Attributes implements driver.Attributes. func (b *bucket) Attributes(ctx context.Context, key string) (*driver.Attributes, error) { key = escapeKey(key, false) blockBlobURL := b.containerURL.NewBlockBlobURL(key) blobPropertiesResponse, err := blockBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{}) if err != nil { return nil, err } azureMD := blobPropertiesResponse.NewMetadata() md := make(map[string]string, len(azureMD)) for k, v := range azureMD { // See the package comments for more details on escaping of metadata // keys & values. md[escape.HexUnescape(k)] = escape.URLUnescape(v) } return &driver.Attributes{ CacheControl: blobPropertiesResponse.CacheControl(), ContentDisposition: blobPropertiesResponse.ContentDisposition(), ContentEncoding: blobPropertiesResponse.ContentEncoding(), ContentLanguage: blobPropertiesResponse.ContentLanguage(), ContentType: blobPropertiesResponse.ContentType(), Size: blobPropertiesResponse.ContentLength(), CreateTime: blobPropertiesResponse.CreationTime(), ModTime: blobPropertiesResponse.LastModified(), MD5: blobPropertiesResponse.ContentMD5(), ETag: fmt.Sprintf("%v", blobPropertiesResponse.ETag()), Metadata: md, AsFunc: func(i interface{}) bool { p, ok := i.(*azblob.BlobGetPropertiesResponse) if !ok { return false } *p = *blobPropertiesResponse return true }, }, nil } // ListPaged implements driver.ListPaged. func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) { pageSize := opts.PageSize if pageSize == 0 { pageSize = defaultPageSize } marker := azblob.Marker{} if len(opts.PageToken) > 0 { if m, ok := b.pageMarkers[string(opts.PageToken)]; ok { marker = m } } azOpts := azblob.ListBlobsSegmentOptions{ MaxResults: int32(pageSize), Prefix: escapeKey(opts.Prefix, true), } if opts.BeforeList != nil { asFunc := func(i interface{}) bool { p, ok := i.(**azblob.ListBlobsSegmentOptions) if !ok { return false } *p = &azOpts return true } if err := opts.BeforeList(asFunc); err != nil { return nil, err } } listBlob, err := b.containerURL.ListBlobsHierarchySegment(ctx, marker, escapeKey(opts.Delimiter, true), azOpts) if err != nil { return nil, err } page := &driver.ListPage{} page.Objects = []*driver.ListObject{} for _, blobPrefix := range listBlob.Segment.BlobPrefixes { page.Objects = append(page.Objects, &driver.ListObject{ Key: unescapeKey(blobPrefix.Name), Size: 0, IsDir: true, AsFunc: func(i interface{}) bool { p, ok := i.(*azblob.BlobPrefix) if !ok { return false } *p = blobPrefix return true }}) } for _, blobInfo := range listBlob.Segment.BlobItems { page.Objects = append(page.Objects, &driver.ListObject{ Key: unescapeKey(blobInfo.Name), ModTime: blobInfo.Properties.LastModified, Size: *blobInfo.Properties.ContentLength, MD5: blobInfo.Properties.ContentMD5, IsDir: false, AsFunc: func(i interface{}) bool { p, ok := i.(*azblob.BlobItemInternal) if !ok { return false } *p = blobInfo return true }, }) } if listBlob.NextMarker.NotDone() { token := uuid.New().String() b.pageMarkers[token] = listBlob.NextMarker page.NextPageToken = []byte(token) } if len(listBlob.Segment.BlobPrefixes) > 0 && len(listBlob.Segment.BlobItems) > 0 { sort.Slice(page.Objects, func(i, j int) bool { return page.Objects[i].Key < page.Objects[j].Key }) } return page, nil } // SignedURL implements driver.SignedURL. func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) { if b.opts.Credential == nil { return "", gcerr.New(gcerr.Unimplemented, nil, 1, "azureblob: to use SignedURL, you must call OpenBucket with a non-nil Options.Credential") } if opts.ContentType != "" || opts.EnforceAbsentContentType { return "", gcerr.New(gcerr.Unimplemented, nil, 1, "azureblob: does not enforce Content-Type on PUT") } key = escapeKey(key, false) blockBlobURL := b.containerURL.NewBlockBlobURL(key) srcBlobParts := azblob.NewBlobURLParts(blockBlobURL.URL()) perms := azblob.BlobSASPermissions{} switch opts.Method { case http.MethodGet: perms.Read = true case http.MethodPut: perms.Create = true perms.Write = true case http.MethodDelete: perms.Delete = true default: return "", fmt.Errorf("unsupported Method %s", opts.Method) } signVals := &azblob.BlobSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, ExpiryTime: time.Now().UTC().Add(opts.Expiry), ContainerName: b.name, BlobName: srcBlobParts.BlobName, Permissions: perms.String(), } if opts.BeforeSign != nil { asFunc := func(i interface{}) bool { v, ok := i.(**azblob.BlobSASSignatureValues) if ok { *v = signVals } return ok } if err := opts.BeforeSign(asFunc); err != nil { return "", err } } var err error if srcBlobParts.SAS, err = signVals.NewSASQueryParameters(b.opts.Credential); err != nil { return "", err } srcBlobURLWithSAS := srcBlobParts.URL() return srcBlobURLWithSAS.String(), nil } type writer struct { ctx context.Context blockBlobURL *azblob.BlockBlobURL uploadOpts *azblob.UploadStreamToBlockBlobOptions w *io.PipeWriter donec chan struct{} err error } // escapeKey does all required escaping for UTF-8 strings to work with Azure. // isPrefix indicates whether the key is a full key, or a prefix/delimiter. func escapeKey(key string, isPrefix bool) string { return escape.HexEscape(key, func(r []rune, i int) bool { c := r[i] switch { // Azure does not work well with backslashes in blob names. case c == '\\': return true // Azure doesn't handle these characters (determined via experimentation). case c < 32 || c == 127: return true // Escape trailing "/" for full keys, otherwise Azure can't address them // consistently. case !isPrefix && i == len(key)-1 && c == '/': return true // For "../", escape the trailing slash. case i > 1 && r[i] == '/' && r[i-1] == '.' && r[i-2] == '.': return true } return false }) } // unescapeKey reverses escapeKey. func unescapeKey(key string) string { return escape.HexUnescape(key) } // NewTypedWriter implements driver.NewTypedWriter. func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) { key = escapeKey(key, false) blockBlobURL := b.containerURL.NewBlockBlobURL(key) if opts.BufferSize == 0 { opts.BufferSize = defaultUploadBlockSize } md := make(map[string]string, len(opts.Metadata)) for k, v := range opts.Metadata { // See the package comments for more details on escaping of metadata // keys & values. e := escape.HexEscape(k, func(runes []rune, i int) bool { c := runes[i] switch { case i == 0 && c >= '0' && c <= '9': return true case escape.IsASCIIAlphanumeric(c): return false case c == '_': return false } return true }) if _, ok := md[e]; ok { return nil, fmt.Errorf("duplicate keys after escaping: %q => %q", k, e) } md[e] = escape.URLEscape(v) } uploadOpts := &azblob.UploadStreamToBlockBlobOptions{ BufferSize: opts.BufferSize, MaxBuffers: defaultUploadBuffers, Metadata: md, BlobHTTPHeaders: azblob.BlobHTTPHeaders{ CacheControl: opts.CacheControl, ContentDisposition: opts.ContentDisposition, ContentEncoding: opts.ContentEncoding, ContentLanguage: opts.ContentLanguage, ContentMD5: opts.ContentMD5, ContentType: contentType, }, } if opts.BeforeWrite != nil { asFunc := func(i interface{}) bool { p, ok := i.(**azblob.UploadStreamToBlockBlobOptions) if !ok { return false } *p = uploadOpts return true } if err := opts.BeforeWrite(asFunc); err != nil { return nil, err } } return &writer{ ctx: ctx, blockBlobURL: &blockBlobURL, uploadOpts: uploadOpts, donec: make(chan struct{}), }, nil } // Write appends p to w. User must call Close to close the w after done writing. func (w *writer) Write(p []byte) (int, error) { if len(p) == 0 { return 0, nil } if w.w == nil { pr, pw := io.Pipe() w.w = pw if err := w.open(pr); err != nil { return 0, err } } return w.w.Write(p) } func (w *writer) open(pr *io.PipeReader) error { go func() { defer close(w.donec) var body io.Reader if pr == nil { body = http.NoBody } else { body = pr } _, w.err = azblob.UploadStreamToBlockBlob(w.ctx, body, *w.blockBlobURL, *w.uploadOpts) if w.err != nil { if pr != nil { pr.CloseWithError(w.err) } return } }() return nil } // Close completes the writer and closes it. Any error occurring during write will // be returned. If a writer is closed before any Write is called, Close will // create an empty file at the given key. func (w *writer) Close() error { if w.w == nil { w.open(nil) } else if err := w.w.Close(); err != nil { return err } <-w.donec return w.err }
1
20,271
Is this really an Option? IIUC, you compute it automatically for the URLOpener case, why would we require a user to fill it in when using the constructor? I.e., can't we drop this and use `adal.MSIAvailable` instead? If that call is expensive, cache it on `bucket`, not `Options`.
google-go-cloud
go
@@ -23,7 +23,7 @@ ExplicitBitVect::ExplicitBitVect(unsigned int size, bool bitsSet) { d_size = 0; - dp_bits = 0; + dp_bits = nullptr; d_numOnBits = 0; _initForSize(size); if (bitsSet) {
1
// $Id$ // // Copyright (c) 2001-2008 greg Landrum and Rational Discovery LLC // Copyright (c) 2014, Novartis Institutes for BioMedical Research Inc. // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <iostream> #include <RDGeneral/Exceptions.h> #include "ExplicitBitVect.h" #include <RDGeneral/StreamOps.h> #include "base64.h" #include <sstream> #include <limits> #ifdef WIN32 #include <ios> #endif #include <boost/cstdint.hpp> ExplicitBitVect::ExplicitBitVect(unsigned int size, bool bitsSet) { d_size = 0; dp_bits = 0; d_numOnBits = 0; _initForSize(size); if (bitsSet) { dp_bits->set(); // set all bits to 1 d_numOnBits = size; } } ExplicitBitVect::ExplicitBitVect(const std::string &s) { d_size = 0; dp_bits = 0; d_numOnBits = 0; initFromText(s.c_str(), s.length()); } ExplicitBitVect::ExplicitBitVect(const char *data, const unsigned int dataLen) { d_size = 0; dp_bits = 0; d_numOnBits = 0; initFromText(data, dataLen); } ExplicitBitVect::ExplicitBitVect(const ExplicitBitVect &other) : BitVect(other) { d_size = other.d_size; dp_bits = new boost::dynamic_bitset<>(*(other.dp_bits)); d_numOnBits = other.d_numOnBits; }; ExplicitBitVect &ExplicitBitVect::operator=(const ExplicitBitVect &other) { d_size = other.d_size; delete dp_bits; dp_bits = new boost::dynamic_bitset<>(*(other.dp_bits)); d_numOnBits = other.d_numOnBits; return *this; }; bool ExplicitBitVect::operator[](const unsigned int which) const { if (which >= d_size) { throw IndexErrorException(which); } return (bool)(*dp_bits)[which]; }; bool ExplicitBitVect::setBit(const unsigned int which) { if (which >= d_size) { throw IndexErrorException(which); } if ((bool)(*dp_bits)[which]) { return true; } else { (*dp_bits)[which] = 1; ++d_numOnBits; return false; } }; bool ExplicitBitVect::unsetBit(const unsigned int which) { if (which >= d_size) { throw IndexErrorException(which); } if ((bool)(*dp_bits)[which]) { (*dp_bits)[which] = 0; --d_numOnBits; return true; } else { return false; } }; bool ExplicitBitVect::getBit(const unsigned int which) const { if (which >= d_size) { throw IndexErrorException(which); } return ((bool)(*dp_bits)[which]); }; ExplicitBitVect ExplicitBitVect::operator^(const ExplicitBitVect &other) const { ExplicitBitVect ans(d_size); *(ans.dp_bits) = (*dp_bits) ^ *(other.dp_bits); ans.d_numOnBits = ans.dp_bits->count(); return (ans); }; ExplicitBitVect ExplicitBitVect::operator&(const ExplicitBitVect &other) const { ExplicitBitVect ans(d_size); *(ans.dp_bits) = (*dp_bits) & *(other.dp_bits); ans.d_numOnBits = ans.dp_bits->count(); return (ans); }; ExplicitBitVect ExplicitBitVect::operator|(const ExplicitBitVect &other) const { ExplicitBitVect ans(d_size); *(ans.dp_bits) = (*dp_bits) | *(other.dp_bits); ans.d_numOnBits = ans.dp_bits->count(); return (ans); }; ExplicitBitVect &ExplicitBitVect::operator^=(const ExplicitBitVect &other) { *(dp_bits) ^= *(other.dp_bits); d_numOnBits = dp_bits->count(); return *this; }; ExplicitBitVect &ExplicitBitVect::operator&=(const ExplicitBitVect &other) { *(dp_bits) &= *(other.dp_bits); d_numOnBits = dp_bits->count(); return *this; }; ExplicitBitVect &ExplicitBitVect::operator|=(const ExplicitBitVect &other) { *(dp_bits) |= *(other.dp_bits); d_numOnBits = dp_bits->count(); return *this; }; ExplicitBitVect ExplicitBitVect::operator~() const { ExplicitBitVect ans(d_size); *(ans.dp_bits) = ~(*dp_bits); ans.d_numOnBits = ans.dp_bits->count(); return (ans); }; ExplicitBitVect &ExplicitBitVect::operator+=(const ExplicitBitVect &other) { dp_bits->resize(d_size + other.d_size); unsigned int original_size = d_size; d_size = dp_bits->size(); for (unsigned i = 0; i < other.d_size; i++) { if (other[i]) { setBit(i + original_size); } } d_numOnBits = dp_bits->count(); return *this; }; ExplicitBitVect ExplicitBitVect::operator+(const ExplicitBitVect &other) const { ExplicitBitVect ans(*this); return ans += other; }; unsigned int ExplicitBitVect::getNumBits() const { return d_size; }; unsigned int ExplicitBitVect::getNumOnBits() const { return d_numOnBits; }; unsigned int ExplicitBitVect::getNumOffBits() const { return d_size - d_numOnBits; }; // the contents of v are blown out void ExplicitBitVect::getOnBits(IntVect &v) const { unsigned int nOn = getNumOnBits(); if (!v.empty()) IntVect().swap(v); v.reserve(nOn); for (unsigned int i = 0; i < d_size; i++) { if ((bool)(*dp_bits)[i]) v.push_back(i); } }; void ExplicitBitVect::_initForSize(unsigned int size) { d_size = size; delete dp_bits; dp_bits = new boost::dynamic_bitset<>(size); d_numOnBits = 0; }; ExplicitBitVect::~ExplicitBitVect() { delete dp_bits; dp_bits = NULL; }; std::string ExplicitBitVect::toString() const { // This Function replaces the older version (version 16) of writing the onbits // to // a string // the old version does not perform any run length encoding, it only checks to // see if // the length of the bitvect can be short ints and writes the on bits as // shorts // other wise the onbits are all written as ints // here we do run length encoding and the version number has been bumped to 32 // as well. // only the reader needs to take care of readinf all legacy versions // also in this scheme each bit number written to the string is checked to see // how many // bytes it needs std::stringstream ss(std::ios_base::binary | std::ios_base::out | std::ios_base::in); boost::int32_t tInt = ci_BITVECT_VERSION * -1; RDKit::streamWrite(ss, tInt); tInt = d_size; RDKit::streamWrite(ss, tInt); tInt = getNumOnBits(); RDKit::streamWrite(ss, tInt); int prev = -1; unsigned int zeroes; for (unsigned int i = 0; i < d_size; i++) { if ((bool)(*dp_bits)[i]) { zeroes = i - prev - 1; RDKit::appendPackedIntToStream(ss, zeroes); prev = i; } } zeroes = d_size - prev - 1; RDKit::appendPackedIntToStream(ss, zeroes); std::string res(ss.str()); return res; }
1
16,080
Was this done automatically or by hand?
rdkit-rdkit
cpp
@@ -136,6 +136,18 @@ module Bolt end private :expand_targets + def remove_target(current_group, target, desired_group) + if current_group.name == desired_group + current_group.remove_target(target) + end + current_group.groups.each do |child_group| + # If target was in current group, remove it from all child groups + desired_group = child_group.name if current_group.name == desired_group + remove_target(child_group, target, desired_group) + end + end + private :remove_target + def add_target(current_group, target, desired_group) if current_group.name == desired_group current_group.add_target(target)
1
# frozen_string_literal: true require 'bolt/inventory/group2' require 'bolt/inventory/target' module Bolt class Inventory class Inventory2 attr_reader :targets, :plugins, :config # This uses "targets" in the message instead of "nodes" class WildcardError < Bolt::Error def initialize(target) super("Found 0 targets matching wildcard pattern #{target}", 'bolt.inventory/wildcard-error') end end def initialize(data, config = nil, plugins: nil) @logger = Logging.logger[self] # Config is saved to add config options to targets @config = config || Bolt::Config.default @data = data || {} @groups = Group2.new(@data.merge('name' => 'all'), plugins) @plugins = plugins @group_lookup = {} # The targets hash is the canonical source for all targets in inventory @targets = {} @groups.resolve_string_targets(@groups.target_aliases, @groups.all_targets) collect_groups end def validate @groups.validate end def version 2 end def target_implementation_class Bolt::Target2 end def collect_groups # Provide a lookup map for finding a group by name @group_lookup = @groups.collect_groups end def group_names @group_lookup.keys end def target_names @groups.all_targets end # alias for analytics alias node_names target_names def get_targets(targets) target_array = expand_targets(targets) if target_array.is_a? Array target_array.flatten.uniq(&:name) else [target_array] end end def get_target(target) target_array = get_targets(target) if target_array.count > 1 raise ValidationError.new("'#{target}' refers to #{target_array.count} targets", nil) end target_array.first end def data_hash { data: {}, target_hash: { target_vars: {}, target_facts: {}, target_features: {} }, config: @config.transport_data_get } end #### PRIVATE #### def group_data_for(target_name) @groups.group_collect(target_name) end # If target is a group name, expand it to the members of that group. # Else match against targets in inventory by name or alias. # If a wildcard string, error if no matches are found. # Else fall back to [target] if no matches are found. def resolve_name(target) if (group = @group_lookup[target]) group.all_targets else # Try to wildcard match targets in inventory # Ignore case because hostnames are generally case-insensitive regexp = Regexp.new("^#{Regexp.escape(target).gsub('\*', '.*?')}$", Regexp::IGNORECASE) targets = @groups.all_targets.select { |targ| targ =~ regexp } targets += @groups.target_aliases.select { |target_alias, _target| target_alias =~ regexp }.values if targets.empty? raise(WildcardError, target) if target.include?('*') [target] else targets end end end private :resolve_name def expand_targets(targets) if targets.is_a? Bolt::Target2 targets elsif targets.is_a? Array targets.map { |tish| expand_targets(tish) } elsif targets.is_a? String # Expand a comma-separated list targets.split(/[[:space:],]+/).reject(&:empty?).map do |name| ts = resolve_name(name) ts.map do |t| # If the target doesn't exist, evaluate it from the inventory. # Then return a Bolt::Target2. unless @targets.key?(t) @targets[t] = create_target_from_inventory(t) end Bolt::Target2.new(t, self) end end end end private :expand_targets def add_target(current_group, target, desired_group) if current_group.name == desired_group current_group.add_target(target) @groups.validate target.invalidate_group_cache! return true end # Recurse on children Groups if not desired_group current_group.groups.each do |child_group| add_target(child_group, target, desired_group) end end private :add_target # Pull in a target definition from the inventory file and evaluate any # associated references. This is used when a target is resolved by # get_targets. def create_target_from_inventory(target_name) target_data = @groups.target_collect(target_name) || { 'uri' => target_name } target = Bolt::Inventory::Target.new(target_data, self) @targets[target.name] = target add_to_group([target], 'all') target end # Add a brand new target, overriding any existing target with the same # name. This method does not honor target config from the inventory. This # is used when Target.new is called from a plan. def create_target_from_plan(data) # If target already exists, delete old and replace with new, otherwise add to new to all group new_target = Bolt::Inventory::Target.new(data, self) existing_target = @targets.key?(new_target.name) @targets[new_target.name] = new_target if existing_target clear_alia_from_group(@groups, new_target.name) else add_to_group([new_target], 'all') end if (aliases = new_target.target_alias) aliases = [aliases] if aliases.is_a?(String) unless aliases.is_a?(Array) msg = "Alias entry on #{t_name} must be a String or Array, not #{aliases.class}" raise ValidationError.new(msg, @name) end @groups.insert_alia(new_target.name, aliases) end new_target end def clear_alia_from_group(group, target_name) if group.all_target_names.include?(target_name) group.clear_alia(target_name) end group.groups.each do |grp| clear_alia_from_group(grp, target_name) end end def add_to_group(targets, desired_group) if group_names.include?(desired_group) targets.each do |target| if group_names.include?(target.name) raise ValidationError.new("Group #{target.name} conflicts with target of the same name", target.name) end # Add the inventory copy of the target add_target(@groups, @targets[target.name], desired_group) end else raise ValidationError.new("Group #{desired_group} does not exist in inventory", nil) end end def set_var(target, var_hash) @targets[target.name].set_var(var_hash) end def vars(target) @targets[target.name].vars end def add_facts(target, new_facts = {}) @targets[target.name].add_facts(new_facts) # rubocop:disable Style/GlobalVars $future ? target : facts(target) # rubocop:enable Style/GlobalVars end def facts(target) @targets[target.name].facts end def set_feature(target, feature, value = true) @targets[target.name].set_feature(feature, value) end def features(target) @targets[target.name].features end def plugin_hooks(target) @targets[target.name].plugin_hooks end def set_config(target, key_or_key_path, value) @targets[target.name].set_config(key_or_key_path, value) end def target_config(target) @targets[target.name].config end end end end
1
13,374
This looks like it will only work for the first child, since after that we've overridden `desired_group`. Should we have a separate variable for this?
puppetlabs-bolt
rb
@@ -3194,7 +3194,7 @@ void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDevice std::copy(std::istreambuf_iterator<char>(read_file), {}, std::back_inserter(validation_cache_data)); read_file.close(); } else { - LogInfo(core_checks->device, "VUID-NONE", + LogInfo(core_checks->device, "UNASSIGNED-cache-file-error", "Cannot open shader validation cache at %s for reading (it may not exist yet)", core_checks->validation_cache_path.c_str()); }
1
/* Copyright (c) 2015-2022 The Khronos Group Inc. * Copyright (c) 2015-2022 Valve Corporation * Copyright (c) 2015-2022 LunarG, Inc. * Copyright (C) 2015-2022 Google Inc. * Modifications Copyright (C) 2020-2022 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Cody Northrop <[email protected]> * Author: Michael Lentine <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Ian Elliott <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Dustin Graves <[email protected]> * Author: Jeremy Hayes <[email protected]> * Author: Jon Ashburn <[email protected]> * Author: Karl Schultz <[email protected]> * Author: Mark Young <[email protected]> * Author: Mike Schuchardt <[email protected]> * Author: Mike Weiblen <[email protected]> * Author: Tony Barbour <[email protected]> * Author: John Zulauf <[email protected]> * Author: Shannon McPherson <[email protected]> * Author: Jeremy Kniager <[email protected]> * Author: Tobias Hector <[email protected]> * Author: Jeremy Gebben <[email protected]> */ #include <algorithm> #include <array> #include <assert.h> #include <cmath> #include <fstream> #include <iostream> #include <list> #include <map> #include <memory> #include <mutex> #include <set> #include <sstream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <string> #include <valarray> #if defined(__linux__) || defined(__FreeBSD__) #include <unistd.h> #include <sys/types.h> #endif #include "vk_loader_platform.h" #include "vk_enum_string_helper.h" #include "chassis.h" #include "convert_to_renderpass2.h" #include "core_validation.h" #include "buffer_validation.h" #include "shader_validation.h" #include "vk_layer_utils.h" #include "sync_utils.h" #include "sync_vuid_maps.h" // these templates are defined in buffer_validation.cpp so we need to pull in the explicit instantiations from there extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const VkImageMemoryBarrier *barrier); extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const VkImageMemoryBarrier2KHR *barrier); extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state, const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle, const VkImageMemoryBarrier &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const; extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state, const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle, const VkImageMemoryBarrier2KHR &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const; using std::max; using std::string; using std::stringstream; using std::unique_ptr; using std::vector; void CoreChecks::AddInitialLayoutintoImageLayoutMap(const IMAGE_STATE &image_state, GlobalImageLayoutMap &image_layout_map) { auto *range_map = GetLayoutRangeMap(image_layout_map, image_state); auto range_gen = subresource_adapter::RangeGenerator(image_state.subresource_encoder); for (; range_gen->non_empty(); ++range_gen) { range_map->insert(range_map->end(), std::make_pair(*range_gen, image_state.createInfo.initialLayout)); } } // Override base class, we have some extra work to do here void CoreChecks::InitDeviceValidationObject(bool add_obj, ValidationObject *inst_obj, ValidationObject *dev_obj) { if (add_obj) { ValidationStateTracker::InitDeviceValidationObject(add_obj, inst_obj, dev_obj); } } // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value. template <typename T1> bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object, const VulkanTypedHandle &typed_handle, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid<T1, SimpleErrorLocation>(mem_state, object, typed_handle, {api_name, error_code}); } template <typename T1, typename LocType> bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object, const VulkanTypedHandle &typed_handle, const LocType &location) const { bool result = false; auto type_name = object_string[typed_handle.type]; if (!mem_state) { result |= LogError(object, location.Vuid(), "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().", location.FuncName(), report_data->FormatHandle(typed_handle).c_str(), type_name + 2); } else if (mem_state->Destroyed()) { result |= LogError(object, location.Vuid(), "%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed " "prior to this operation.", location.FuncName(), report_data->FormatHandle(typed_handle).c_str()); } return result; } // Check to see if memory was ever bound to this image bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const Location &loc) const { using LocationAdapter = core_error::LocationVuidAdapter<sync_vuid_maps::GetImageBarrierVUIDFunctor>; return ValidateMemoryIsBoundToImage<LocationAdapter>(image_state, LocationAdapter(loc, sync_vuid_maps::ImageError::kNoMemory)); } bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const { return ValidateMemoryIsBoundToImage<SimpleErrorLocation>(image_state, SimpleErrorLocation(api_name, error_code)); } template <typename LocType> bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const LocType &location) const { bool result = false; if (image_state->create_from_swapchain != VK_NULL_HANDLE) { if (!image_state->bind_swapchain) { LogObjectList objlist(image_state->image()); objlist.add(image_state->create_from_swapchain); result |= LogError( objlist, location.Vuid(), "%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain " "includes VkBindImageMemorySwapchainInfoKHR.", location.FuncName(), report_data->FormatHandle(image_state->image()).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str()); } else if (image_state->create_from_swapchain != image_state->bind_swapchain->swapchain()) { LogObjectList objlist(image_state->image()); objlist.add(image_state->create_from_swapchain); objlist.add(image_state->bind_swapchain->Handle()); result |= LogError(objlist, location.Vuid(), "%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same " "swapchain", location.FuncName(), report_data->FormatHandle(image_state->image()).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str(), report_data->FormatHandle(image_state->bind_swapchain->Handle()).c_str()); } } else if (image_state->IsExternalAHB()) { // TODO look into how to properly check for a valid bound memory for an external AHB } else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { result |= VerifyBoundMemoryIsValid(image_state->MemState(), image_state->image(), image_state->Handle(), location); } return result; } // Check to see if memory was bound to this buffer bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name, const char *error_code) const { bool result = false; if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { result |= VerifyBoundMemoryIsValid(buffer_state->MemState(), buffer_state->buffer(), buffer_state->Handle(), api_name, error_code); } return result; } // Check to see if memory was bound to this acceleration structure bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid(as_state->MemState(), as_state->acceleration_structure(), as_state->Handle(), api_name, error_code); } // Check to see if memory was bound to this acceleration structure bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE_KHR *as_state, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid(as_state->MemState(), as_state->acceleration_structure(), as_state->Handle(), api_name, error_code); } // Valid usage checks for a call to SetMemBinding(). // For NULL mem case, output warning // Make sure given object is in global object map // IF a previous binding existed, output validation error // Otherwise, add reference from objectInfo to memoryInfo // Add reference off of objInfo // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions. bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const BINDABLE &mem_binding, const char *apiName) const { bool skip = false; // It's an error to bind an object to NULL memory if (mem != VK_NULL_HANDLE) { auto typed_handle = mem_binding.Handle(); if (mem_binding.sparse) { const char *error_code = nullptr; const char *handle_type = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { handle_type = "BUFFER"; if (strcmp(apiName, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-buffer-01030"; } else { error_code = "VUID-VkBindBufferMemoryInfo-buffer-01030"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { handle_type = "IMAGE"; if (strcmp(apiName, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-image-01045"; } else { error_code = "VUID-VkBindImageMemoryInfo-image-01045"; } } else { // Unsupported object type assert(false); } LogObjectList objlist(mem); objlist.add(typed_handle); skip |= LogError(objlist, error_code, "In %s, attempting to bind %s to %s which was created with sparse memory flags " "(VK_%s_CREATE_SPARSE_*_BIT).", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), handle_type); } const auto mem_info = Get<DEVICE_MEMORY_STATE>(mem); if (mem_info) { const auto *prev_binding = mem_binding.MemState(); if (prev_binding) { if (!prev_binding->Destroyed()) { const char *error_code = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { if (strcmp(apiName, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-buffer-01029"; } else { error_code = "VUID-VkBindBufferMemoryInfo-buffer-01029"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { if (strcmp(apiName, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-image-01044"; } else { error_code = "VUID-VkBindImageMemoryInfo-image-01044"; } } else { // Unsupported object type assert(false); } LogObjectList objlist(mem); objlist.add(typed_handle); objlist.add(prev_binding->mem()); skip |= LogError(objlist, error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), report_data->FormatHandle(prev_binding->mem()).c_str()); } else { LogObjectList objlist(mem); objlist.add(typed_handle); skip |= LogError(objlist, kVUID_Core_MemTrack_RebindObject, "In %s, attempting to bind %s to %s which was previous bound to memory that has " "since been freed. Memory bindings are immutable in " "Vulkan so this attempt to bind to new memory is not allowed.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str()); } } } } return skip; } bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name, const char *error_code, bool optional = false) const { bool skip = false; if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) { skip |= LogError(device, error_code, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name); } else if (queue_family_index_set.find(queue_family) == queue_family_index_set.end()) { skip |= LogError(device, error_code, "%s: %s (= %" PRIu32 ") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.", cmd_name, parameter_name, queue_family); } return skip; } // Validate the specified queue families against the families supported by the physical device that owns this device bool CoreChecks::ValidatePhysicalDeviceQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name, const char *array_parameter_name, const char *vuid) const { bool skip = false; if (queue_families) { layer_data::unordered_set<uint32_t> set; for (uint32_t i = 0; i < queue_family_count; ++i) { std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]"; if (set.count(queue_families[i])) { skip |= LogError(device, vuid, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name, parameter_name.c_str(), queue_families[i], array_parameter_name); } else { set.insert(queue_families[i]); if (queue_families[i] == VK_QUEUE_FAMILY_IGNORED) { skip |= LogError( device, vuid, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name.c_str()); } else if (queue_families[i] >= physical_device_state->queue_family_known_count) { LogObjectList obj_list(physical_device); obj_list.add(device); skip |= LogError(obj_list, vuid, "%s: %s (= %" PRIu32 ") is not one of the queue families supported by the parent PhysicalDevice %s of this device %s.", cmd_name, parameter_name.c_str(), queue_families[i], report_data->FormatHandle(physical_device).c_str(), report_data->FormatHandle(device).c_str()); } } } } return skip; } // Check object status for selected flag state bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, const char *fail_msg, const char *msg_code) const { if (!(pNode->status & status_mask)) { return LogError(pNode->commandBuffer(), msg_code, "%s: %s.", report_data->FormatHandle(pNode->commandBuffer()).c_str(), fail_msg); } return false; } // Return true if for a given PSO, the given state enum is dynamic, else return false bool CoreChecks::IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) const { if (pPipeline && (pPipeline->GetPipelineType() == VK_PIPELINE_BIND_POINT_GRAPHICS) && pPipeline->create_info.graphics.pDynamicState) { for (uint32_t i = 0; i < pPipeline->create_info.graphics.pDynamicState->dynamicStateCount; i++) { if (state == pPipeline->create_info.graphics.pDynamicState->pDynamicStates[i]) return true; } } return false; } // Validate state stored as flags at time of draw call bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed, const char *msg_code) const { bool result = false; if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { result |= ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, "Dynamic line width state not set for this command buffer", msg_code); } const auto &create_info = pPipe->create_info.graphics; if (create_info.pRasterizationState && (create_info.pRasterizationState->depthBiasEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, "Dynamic depth bias state not set for this command buffer", msg_code); } if (pPipe->blend_constants_enabled) { result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, "Dynamic blend constants state not set for this command buffer", msg_code); } if (create_info.pDepthStencilState && (create_info.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, "Dynamic depth bounds state not set for this command buffer", msg_code); } if (create_info.pDepthStencilState && (create_info.pDepthStencilState->stencilTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET, "Dynamic stencil read mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, "Dynamic stencil write mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET, "Dynamic stencil reference state not set for this command buffer", msg_code); } if (indexed) { result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND, "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code); } if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { const auto *line_state = LvlFindInChain<VkPipelineRasterizationLineStateCreateInfoEXT>(create_info.pRasterizationState->pNext); if (line_state && line_state->stippledLineEnable) { result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, "Dynamic line stipple state not set for this command buffer", msg_code); } } return result; } bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *msg, const char *caller, const char *error_code) const { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not " "compatible with %u: %s.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), primary_attach, secondary_attach, msg); } bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *caller, const char *error_code) const { bool skip = false; const auto &primary_pass_ci = rp1_state->createInfo; const auto &secondary_pass_ci = rp2_state->createInfo; if (primary_pass_ci.attachmentCount <= primary_attach) { primary_attach = VK_ATTACHMENT_UNUSED; } if (secondary_pass_ci.attachmentCount <= secondary_attach) { secondary_attach = VK_ATTACHMENT_UNUSED; } if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) { return skip; } if (primary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The first is unused while the second is not.", caller, error_code); return skip; } if (secondary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The second is unused while the first is not.", caller, error_code); return skip; } if (primary_pass_ci.pAttachments[primary_attach].format != secondary_pass_ci.pAttachments[secondary_attach].format) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different formats.", caller, error_code); } if (primary_pass_ci.pAttachments[primary_attach].samples != secondary_pass_ci.pAttachments[secondary_attach].samples) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different samples.", caller, error_code); } if (primary_pass_ci.pAttachments[primary_attach].flags != secondary_pass_ci.pAttachments[secondary_attach].flags) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different flags.", caller, error_code); } return skip; } bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass, const char *caller, const char *error_code) const { bool skip = false; const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass]; const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass]; uint32_t max_input_attachment_count = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); for (uint32_t i = 0; i < max_input_attachment_count; ++i) { uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.inputAttachmentCount) { primary_input_attach = primary_desc.pInputAttachments[i].attachment; } if (i < secondary_desc.inputAttachmentCount) { secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } uint32_t max_color_attachment_count = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); for (uint32_t i = 0; i < max_color_attachment_count; ++i) { uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount) { primary_color_attach = primary_desc.pColorAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount) { secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach, secondary_color_attach, caller, error_code); if (rp1_state->createInfo.subpassCount > 1) { uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach, secondary_resolve_attach, caller, error_code); } } uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; if (primary_desc.pDepthStencilAttachment) { primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; } if (secondary_desc.pDepthStencilAttachment) { secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach, secondary_depthstencil_attach, caller, error_code); // Both renderpasses must agree on Multiview usage if (primary_desc.viewMask && secondary_desc.viewMask) { if (primary_desc.viewMask != secondary_desc.viewMask) { std::stringstream ss; ss << "For subpass " << subpass << ", they have a different viewMask. The first has view mask " << primary_desc.viewMask << " while the second has view mask " << secondary_desc.viewMask << "."; skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, ss.str().c_str(), caller, error_code); } } else if (primary_desc.viewMask) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The first uses Multiview (has non-zero viewMasks) while the second one does not.", caller, error_code); } else if (secondary_desc.viewMask) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The second uses Multiview (has non-zero viewMasks) while the first one does not.", caller, error_code); } return skip; } bool CoreChecks::LogInvalidPnextMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *msg, const char *caller, const char *error_code) const { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s: %s", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), msg); } // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible. // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and // will then feed into this function bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) const { bool skip = false; // createInfo flags must be identical for the renderpasses to be compatible. if (rp1_state->createInfo.flags != rp2_state->createInfo.flags) { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); skip |= LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s with flags of %u and %s w/ " "%s with a flags of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), rp1_state->createInfo.flags, type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), rp2_state->createInfo.flags); } if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); skip |= LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ " "%s with a subpassCount of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), rp2_state->createInfo.subpassCount); } else { for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) { skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code); } } // Find an entry of the Fragment Density Map type in the pNext chain, if it exists const auto fdm1 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp1_state->createInfo.pNext); const auto fdm2 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp2_state->createInfo.pNext); // Both renderpasses must agree on usage of a Fragment Density Map type if (fdm1 && fdm2) { uint32_t primary_input_attach = fdm1->fragmentDensityMapAttachment.attachment; uint32_t secondary_input_attach = fdm2->fragmentDensityMapAttachment.attachment; skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } else if (fdm1) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The first uses a Fragment Density Map while the second one does not.", caller, error_code); } else if (fdm2) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The second uses a Fragment Density Map while the first one does not.", caller, error_code); } return skip; } // For given pipeline, return number of MSAA samples, or one if MSAA disabled static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) { if (pipe->create_info.graphics.pMultisampleState != NULL && VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->create_info.graphics.pMultisampleState->sType) { return pipe->create_info.graphics.pMultisampleState->rasterizationSamples; } return VK_SAMPLE_COUNT_1_BIT; } static void ListBits(std::ostream &s, uint32_t bits) { for (int i = 0; i < 32 && bits; i++) { if (bits & (1 << i)) { s << i; bits &= ~(1 << i); if (bits) { s << ","; } } } } std::string DynamicStateString(CBStatusFlags input_value) { std::string ret; int index = 0; while (input_value) { if (input_value & 1) { if (!ret.empty()) ret.append("|"); ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(1llu << index)))); } ++index; input_value >>= 1; } if (ret.empty()) ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(0)))); return ret; } // Validate draw-time state related to the PSO bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type, const PIPELINE_STATE *pPipeline) const { bool skip = false; const auto &current_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings; const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const char *caller = CommandTypeString(cmd_type); if (pCB->activeRenderPass->use_dynamic_rendering) { if (pPipeline->rp_state->renderPass() != VK_NULL_HANDLE) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_06198, "%s: Currently bound pipeline %s must have been created with a VkGraphicsPipelineCreateInfo::renderPass equal to VK_NULL_HANDLE", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str()); } if (pPipeline->rp_state->dynamic_rendering_pipeline_create_info.viewMask != pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.viewMask) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_view_mask, "%s: Currently bound pipeline %s viewMask ([%" PRIu32 ") must be equal to pBeginRendering->viewMask ([%" PRIu32 ")", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), pPipeline->rp_state->dynamic_rendering_pipeline_create_info.viewMask, pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.viewMask); } if (pPipeline->rp_state->dynamic_rendering_pipeline_create_info.colorAttachmentCount != pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_color_count, "%s: Currently bound pipeline %s colorAttachmentCount ([%" PRIu32 ") must be equal to pBeginRendering->colorAttachmentCount ([%" PRIu32 ")", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), pPipeline->rp_state->dynamic_rendering_pipeline_create_info.colorAttachmentCount, pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount); } if (pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount > 0) { for (uint32_t i = 0; i < pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount; ++i) { if (pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[i].imageView != VK_NULL_HANDLE) { auto view_state = Get<IMAGE_VIEW_STATE>( pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[i].imageView); if (view_state->create_info.format != pPipeline->rp_state->dynamic_rendering_pipeline_create_info.pColorAttachmentFormats[i]) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_color_formats, "%s: Color attachment ([%" PRIu32 ") imageView format (%s) must match corresponding format in pipeline (%s)", caller, i, string_VkFormat(view_state->create_info.format), string_VkFormat(pPipeline->rp_state->dynamic_rendering_pipeline_create_info.pColorAttachmentFormats[i])); } } } } auto rendering_fragment_shading_rate_attachment_info = LvlFindInChain<VkRenderingFragmentShadingRateAttachmentInfoKHR>( pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pNext); if (rendering_fragment_shading_rate_attachment_info && (rendering_fragment_shading_rate_attachment_info->imageView != VK_NULL_HANDLE)) { if (!(pPipeline->rp_state->createInfo.flags & VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_fsr, "%s: Currently bound graphics pipeline %s must have been created with VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str()); } } auto rendering_fragment_shading_rate_density_map = LvlFindInChain<VkRenderingFragmentDensityMapAttachmentInfoEXT>( pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pNext); if (rendering_fragment_shading_rate_density_map && (rendering_fragment_shading_rate_density_map->imageView != VK_NULL_HANDLE)) { if (!(pPipeline->rp_state->createInfo.flags & VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT)) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_fdm, "%s: Currently bound graphics pipeline %s must have been created with VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str()); } } auto p_attachment_sample_count_info_amd = LvlFindInChain<VkAttachmentSampleCountInfoAMD>(pPipeline->create_info.graphics.pNext); auto p_attachment_sample_count_info_nv = LvlFindInChain<VkAttachmentSampleCountInfoNV>(pPipeline->create_info.graphics.pNext); if ((p_attachment_sample_count_info_amd || p_attachment_sample_count_info_nv) && (pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount > 0)) { for (uint32_t i = 0; i < pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount; ++i) { if (pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[i].imageView != VK_NULL_HANDLE) { auto color_view_state = Get<IMAGE_VIEW_STATE>( pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[i].imageView); auto color_image_samples = Get<IMAGE_STATE>(color_view_state->create_info.image)->createInfo.samples; if (p_attachment_sample_count_info_amd) { if (color_image_samples != p_attachment_sample_count_info_amd->pColorAttachmentSamples[i]) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_color_sample, "%s: Color attachment (%" PRIu32 ") sample count (%s) must match corresponding VkAttachmentSampleCountInfoAMD sample count (%s)", caller, i, string_VkSampleCountFlagBits(color_image_samples), string_VkSampleCountFlagBits(p_attachment_sample_count_info_amd->pColorAttachmentSamples[i])); } } if (p_attachment_sample_count_info_nv) { if (color_image_samples != p_attachment_sample_count_info_nv->pColorAttachmentSamples[i]) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_color_sample, "%s: Color attachment (%" PRIu32 ") sample count (%s) must match corresponding VkAttachmentSampleCountInfoNV sample count (%s)", caller, i, string_VkSampleCountFlagBits(color_image_samples), string_VkSampleCountFlagBits(p_attachment_sample_count_info_nv->pColorAttachmentSamples[i])); } } if (color_image_samples != pPipeline->create_info.graphics.pMultisampleState->rasterizationSamples) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_multi_sample, "%s: Color attachment (%" PRIu32 ") sample count (%s) must match corresponding VkPipelineMultisampleStateCreateInfo sample count (%s)", caller, i, string_VkSampleCountFlagBits(color_image_samples), string_VkSampleCountFlagBits(pPipeline->create_info.graphics.pMultisampleState->rasterizationSamples)); } } } auto depth_view_state = Get<IMAGE_VIEW_STATE>(pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView); auto depth_image_samples = Get<IMAGE_STATE>(depth_view_state->create_info.image)->createInfo.samples; if (p_attachment_sample_count_info_amd) { if (depth_image_samples != p_attachment_sample_count_info_amd->depthStencilAttachmentSamples) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_depth_sample, "%s: Depth attachment sample count (%s) must match corresponding VkAttachmentSampleCountInfoAMD sample count (%s)", caller, string_VkSampleCountFlagBits(depth_image_samples), string_VkSampleCountFlagBits(p_attachment_sample_count_info_amd->depthStencilAttachmentSamples)); } } if (p_attachment_sample_count_info_nv) { if (depth_image_samples != p_attachment_sample_count_info_nv->depthStencilAttachmentSamples) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_depth_sample, "%s: Depth attachment sample count (%s) must match corresponding VkAttachmentSampleCountInfoNV sample count (%s)", caller, string_VkSampleCountFlagBits(depth_image_samples), string_VkSampleCountFlagBits(p_attachment_sample_count_info_nv->depthStencilAttachmentSamples)); } } auto stencil_view_state = Get<IMAGE_VIEW_STATE>(pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView); auto stencil_image_samples = Get<IMAGE_STATE>(stencil_view_state->create_info.image)->createInfo.samples; if (p_attachment_sample_count_info_amd) { if (stencil_image_samples != p_attachment_sample_count_info_amd->depthStencilAttachmentSamples) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_stencil_sample, "%s: Stencil attachment sample count (%s) must match corresponding VkAttachmentSampleCountInfoAMD sample count (%s)", caller, string_VkSampleCountFlagBits(stencil_image_samples), string_VkSampleCountFlagBits(p_attachment_sample_count_info_amd->depthStencilAttachmentSamples)); } } if (p_attachment_sample_count_info_nv) { if (stencil_image_samples != p_attachment_sample_count_info_nv->depthStencilAttachmentSamples) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_stencil_sample, "%s: Stencil attachment sample count (%s) must match corresponding VkAttachmentSampleCountInfoNV sample count (%s)", caller, string_VkSampleCountFlagBits(stencil_image_samples), string_VkSampleCountFlagBits(p_attachment_sample_count_info_nv->depthStencilAttachmentSamples)); } } } if (!p_attachment_sample_count_info_amd && !p_attachment_sample_count_info_nv) { if ((pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment != nullptr) && (pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView != VK_NULL_HANDLE)) { const auto& depth_view_state = Get<IMAGE_VIEW_STATE>(pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView); const auto& depth_image_samples = Get<IMAGE_STATE>(depth_view_state->create_info.image)->createInfo.samples; if (depth_image_samples != pPipeline->create_info.graphics.pMultisampleState->rasterizationSamples) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_06189, "%s: Depth attachment sample count (%s) must match corresponding VkPipelineMultisampleStateCreateInfo::rasterizationSamples count (%s)", caller, string_VkSampleCountFlagBits(depth_image_samples), string_VkSampleCountFlagBits(pPipeline->create_info.graphics.pMultisampleState->rasterizationSamples)); } } if ((pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment != nullptr) && (pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment->imageView != VK_NULL_HANDLE)) { const auto& stencil_view_state = Get<IMAGE_VIEW_STATE>(pCB->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment->imageView); const auto& stencil_image_samples = Get<IMAGE_STATE>(stencil_view_state->create_info.image)->createInfo.samples; if (stencil_image_samples != pPipeline->create_info.graphics.pMultisampleState->rasterizationSamples) { skip |= LogError(pCB->commandBuffer(), vuid.dynamic_rendering_06190, "%s: Stencil attachment sample count (%s) must match corresponding VkPipelineMultisampleStateCreateInfo::rasterizationSamples count (%s)", caller, string_VkSampleCountFlagBits(stencil_image_samples), string_VkSampleCountFlagBits(pPipeline->create_info.graphics.pMultisampleState->rasterizationSamples)); } } } } // Verify vertex & index buffer for unprotected command buffer. // Because vertex & index buffer is read only, it doesn't need to care protected command buffer case. if (enabled_features.core11.protectedMemory == VK_TRUE) { for (const auto &buffer_binding : current_vtx_bfr_binding_info) { if (buffer_binding.buffer_state && !buffer_binding.buffer_state->Destroyed()) { skip |= ValidateProtectedBuffer(pCB, buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer, "Buffer is vertex buffer"); } } if (pCB->index_buffer_binding.buffer_state && !pCB->index_buffer_binding.buffer_state->Destroyed()) { skip |= ValidateProtectedBuffer(pCB, pCB->index_buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer, "Buffer is index buffer"); } } // Verify if using dynamic state setting commands that it doesn't set up in pipeline CBStatusFlags invalid_status = CBSTATUS_ALL_STATE_SET & ~(pCB->dynamic_status | pCB->static_status); if (invalid_status) { std::string dynamic_states = DynamicStateString(invalid_status); LogObjectList objlist(pCB->commandBuffer()); objlist.add(pPipeline->pipeline()); skip |= LogError(objlist, vuid.dynamic_state_setting_commands, "%s: %s doesn't set up %s, but it calls the related dynamic state setting commands", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), dynamic_states.c_str()); } // Verify vertex binding if (pPipeline->vertex_binding_descriptions_.size() > 0) { for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) { const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding; if (current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) { skip |= LogError(pCB->commandBuffer(), vuid.vertex_binding, "%s: %s expects that this Command Buffer's vertex binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), vertex_binding, i, vertex_binding); } else if ((current_vtx_bfr_binding_info[vertex_binding].buffer_state == nullptr) && !enabled_features.robustness2_features.nullDescriptor) { skip |= LogError(pCB->commandBuffer(), vuid.vertex_binding_null, "%s: Vertex binding %d must not be VK_NULL_HANDLE %s expects that this Command Buffer's vertex " "binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), vertex_binding, i, vertex_binding); } } // Verify vertex attribute address alignment for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) { const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i]; const auto vertex_binding = attribute_description.binding; const auto attribute_offset = attribute_description.offset; const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding); if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) && (vertex_binding < current_vtx_bfr_binding_info.size()) && ((current_vtx_bfr_binding_info[vertex_binding].buffer_state) || enabled_features.robustness2_features.nullDescriptor)) { auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride; if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)) { vertex_buffer_stride = static_cast<uint32_t>(current_vtx_bfr_binding_info[vertex_binding].stride); uint32_t attribute_binding_extent = attribute_description.offset + FormatElementSize(attribute_description.format); if (vertex_buffer_stride != 0 && vertex_buffer_stride < attribute_binding_extent) { skip |= LogError(pCB->commandBuffer(), "VUID-vkCmdBindVertexBuffers2EXT-pStrides-06209", "The pStrides[%u] (%u) parameter in the last call to vkCmdBindVertexBuffers2EXT is not 0 " "and less than the extent of the binding for attribute %zu (%u).", vertex_binding, vertex_buffer_stride, i, attribute_binding_extent); } } const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset; // Use 1 as vertex/instance index to use buffer stride as well const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset; VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i]; if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) { LogObjectList objlist(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer()); objlist.add(state.pipeline_state->pipeline()); skip |= LogError( objlist, vuid.vertex_binding_attribute, "%s: Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER ", %s,from of %s and vertex %s.", caller, i, string_VkFormat(attribute_description.format), report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer()).c_str()); } } else { LogObjectList objlist(pCB->commandBuffer()); objlist.add(state.pipeline_state->pipeline()); skip |= LogError(objlist, vuid.vertex_binding_attribute, "%s: binding #%" PRIu32 " in pVertexAttributeDescriptions of %s is invalid in vkCmdBindVertexBuffers of %s.", caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str()); } } } // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. // Skip check if rasterization is disabled, if there is no viewport, or if viewport/scissors are being inherited. bool dyn_viewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); const auto &create_info = pPipeline->create_info.graphics; if ((!create_info.pRasterizationState || (create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && create_info.pViewportState && pCB->inheritedViewportDepths.size() == 0) { bool dyn_scissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); // NB (akeley98): Current validation layers do not detect the error where vkCmdSetViewport (or scissor) was called, but // the dynamic state set is overwritten by binding a graphics pipeline with static viewport (scissor) state. // This condition be detected by checking trashedViewportMask & viewportMask (trashedScissorMask & scissorMask) is // nonzero in the range of bits needed by the pipeline. if (dyn_viewport) { const auto required_viewports_mask = (1 << create_info.pViewportState->viewportCount) - 1; const auto missing_viewport_mask = ~pCB->viewportMask & required_viewports_mask; if (missing_viewport_mask) { std::stringstream ss; ss << caller << ": Dynamic viewport(s) "; ListBits(ss, missing_viewport_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport()."; skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str()); } } if (dyn_scissor) { const auto required_scissor_mask = (1 << create_info.pViewportState->scissorCount) - 1; const auto missing_scissor_mask = ~pCB->scissorMask & required_scissor_mask; if (missing_scissor_mask) { std::stringstream ss; ss << caller << ": Dynamic scissor(s) "; ListBits(ss, missing_scissor_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor()."; skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str()); } } bool dyn_viewport_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT); bool dyn_scissor_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT); // VUID {refpage}-viewportCount-03417 if (dyn_viewport_count && !dyn_scissor_count) { const auto required_viewport_mask = (1 << create_info.pViewportState->scissorCount) - 1; const auto missing_viewport_mask = ~pCB->viewportWithCountMask & required_viewport_mask; if (missing_viewport_mask) { std::stringstream ss; ss << caller << ": Dynamic viewport with count "; ListBits(ss, missing_viewport_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewportWithCountEXT()."; skip |= LogError(device, vuid.viewport_count, "%s", ss.str().c_str()); } } // VUID {refpage}-scissorCount-03418 if (dyn_scissor_count && !dyn_viewport_count) { const auto required_scissor_mask = (1 << create_info.pViewportState->viewportCount) - 1; const auto missing_scissor_mask = ~pCB->scissorWithCountMask & required_scissor_mask; if (missing_scissor_mask) { std::stringstream ss; ss << caller << ": Dynamic scissor with count "; ListBits(ss, missing_scissor_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissorWithCountEXT()."; skip |= LogError(device, vuid.scissor_count, "%s", ss.str().c_str()); } } // VUID {refpage}-viewportCount-03419 if (dyn_scissor_count && dyn_viewport_count) { if (pCB->viewportWithCountMask != pCB->scissorWithCountMask) { std::stringstream ss; ss << caller << ": Dynamic viewport and scissor with count "; ListBits(ss, pCB->viewportWithCountMask ^ pCB->scissorWithCountMask); ss << " are used by pipeline state object, but were not provided via matching calls to " "vkCmdSetViewportWithCountEXT and vkCmdSetScissorWithCountEXT()."; skip |= LogError(device, vuid.viewport_scissor_count, "%s", ss.str().c_str()); } } } // If inheriting viewports, verify that not using more than inherited. if (pCB->inheritedViewportDepths.size() != 0 && dyn_viewport) { uint32_t viewport_count = create_info.pViewportState->viewportCount; uint32_t max_inherited = uint32_t(pCB->inheritedViewportDepths.size()); if (viewport_count > max_inherited) { skip |= LogError(device, vuid.dynamic_state, "Pipeline requires more viewports (%u) than inherited (viewportDepthCount=%u).", unsigned(viewport_count), unsigned(max_inherited)); } } // Verify that any MSAA request in PSO matches sample# in bound FB // Verify that blend is enabled only if supported by subpasses image views format features // Skip the check if rasterization is disabled. if (!create_info.pRasterizationState || (create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline); if (pCB->activeRenderPass) { if (pCB->activeRenderPass->use_dynamic_rendering || pCB->activeRenderPass->use_dynamic_rendering_inherited) { // TODO: Mirror the below VUs but using dynamic rendering const auto dynamic_rendering_info = pCB->activeRenderPass->dynamic_rendering_begin_rendering_info; } else { const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr(); const VkSubpassDescription2 *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass]; uint32_t i; unsigned subpass_num_samples = 0; for (i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples); const auto *imageview_state = pCB->GetActiveAttachmentImageViewState(attachment); if (imageview_state != nullptr && attachment < pPipeline->create_info.graphics.pColorBlendState->attachmentCount) { if ((imageview_state->format_features & VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR) == 0 && pPipeline->create_info.graphics.pColorBlendState->pAttachments[i].blendEnable != VK_FALSE) { skip |= LogError(pPipeline->pipeline(), vuid.blend_enable, "%s: Image view's format features of the color attachment (%" PRIu32 ") of the active subpass do not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT " "bit, but active pipeline's pAttachments[%" PRIu32 "].blendEnable is not VK_FALSE.", caller, attachment, attachment); } } } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples); } if (!(IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples) || IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples)) && ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) { LogObjectList objlist(pPipeline->pipeline()); objlist.add(pCB->activeRenderPass->renderPass()); skip |= LogError(objlist, vuid.rasterization_samples, "%s: In %s the sample count is %s while the current %s has %s and they need to be the same.", caller, report_data->FormatHandle(pPipeline->pipeline()).c_str(), string_VkSampleCountFlagBits(pso_num_samples), report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str(), string_VkSampleCountFlags(static_cast<VkSampleCountFlags>(subpass_num_samples)).c_str()); } } } else { skip |= LogError(pPipeline->pipeline(), kVUID_Core_DrawState_NoActiveRenderpass, "%s: No active render pass found at draw-time in %s!", caller, report_data->FormatHandle(pPipeline->pipeline()).c_str()); } } // Verify that PSO creation renderPass is compatible with active renderPass if (pCB->activeRenderPass && !pCB->activeRenderPass->use_dynamic_rendering) { // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted if (pCB->activeRenderPass->renderPass() != pPipeline->rp_state->renderPass()) { // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass.get(), "pipeline state object", pPipeline->rp_state.get(), caller, vuid.render_pass_compatible); } if (pPipeline->create_info.graphics.subpass != pCB->activeSubpass) { skip |= LogError(pPipeline->pipeline(), vuid.subpass_index, "%s: Pipeline was built for subpass %u but used in subpass %u.", caller, pPipeline->create_info.graphics.subpass, pCB->activeSubpass); } // Check if depth stencil attachment was created with sample location compatible bit if (pPipeline->sample_location_enabled == VK_TRUE) { const safe_VkAttachmentReference2 *ds_attachment = pCB->activeRenderPass->createInfo.pSubpasses[pCB->activeSubpass].pDepthStencilAttachment; const FRAMEBUFFER_STATE *fb_state = pCB->activeFramebuffer.get(); if ((ds_attachment != nullptr) && (fb_state != nullptr)) { const uint32_t attachment = ds_attachment->attachment; if (attachment != VK_ATTACHMENT_UNUSED) { const auto *imageview_state = pCB->GetActiveAttachmentImageViewState(attachment); if (imageview_state != nullptr) { const auto *image_state = imageview_state->image_state.get(); if (image_state != nullptr) { if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) == 0) { skip |= LogError(pPipeline->pipeline(), vuid.sample_location, "%s: sampleLocationsEnable is true for the pipeline, but the subpass (%u) depth " "stencil attachment's VkImage was not created with " "VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT.", caller, pCB->activeSubpass); } } } } } } } skip |= ValidateStatus(pCB, CBSTATUS_PATCH_CONTROL_POINTS_SET, "Dynamic patch control points not set for this command buffer", vuid.patch_control_points); skip |= ValidateStatus(pCB, CBSTATUS_RASTERIZER_DISCARD_ENABLE_SET, "Dynamic rasterizer discard enable not set for this command buffer", vuid.rasterizer_discard_enable); skip |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_ENABLE_SET, "Dynamic depth bias enable not set for this command buffer", vuid.depth_bias_enable); skip |= ValidateStatus(pCB, CBSTATUS_LOGIC_OP_SET, "Dynamic state logicOp not set for this command buffer", vuid.logic_op); skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_RESTART_ENABLE_SET, "Dynamic primitive restart enable not set for this command buffer", vuid.primitive_restart_enable); skip |= ValidateStatus(pCB, CBSTATUS_VERTEX_INPUT_BINDING_STRIDE_SET, "Dynamic vertex input binding stride not set for this command buffer", vuid.vertex_input_binding_stride); skip |= ValidateStatus(pCB, CBSTATUS_VERTEX_INPUT_SET, "Dynamic vertex input not set for this command buffer", vuid.vertex_input); // VUID {refpage}-primitiveTopology-03420 skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_TOPOLOGY_SET, "Dynamic primitive topology state not set for this command buffer", vuid.primitive_topology); if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)) { bool compatible_topology = false; switch (create_info.pInputAssemblyState->topology) { case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: compatible_topology = true; break; default: break; } break; default: break; } if (!compatible_topology) { skip |= LogError(pPipeline->pipeline(), vuid.primitive_topology, "%s: the last primitive topology %s state set by vkCmdSetPrimitiveTopologyEXT is " "not compatible with the pipeline topology %s.", caller, string_VkPrimitiveTopology(pCB->primitiveTopology), string_VkPrimitiveTopology(pPipeline->create_info.graphics.pInputAssemblyState->topology)); } } if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) { skip |= ValidateGraphicsPipelineShaderDynamicState(pPipeline, pCB, caller, vuid); } return skip; } // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to // pipelineLayout[layoutIndex] static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set, PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex, string &errorMsg) { auto num_sets = pipeline_layout->set_layouts.size(); if (layoutIndex >= num_sets) { stringstream error_str; error_str << report_data->FormatHandle(pipeline_layout->layout()) << ") only contains " << num_sets << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index " << layoutIndex; errorMsg = error_str.str(); return false; } if (descriptor_set->IsPushDescriptor()) return true; auto layout_node = pipeline_layout->set_layouts[layoutIndex].get(); return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg); } // Validate overall state at the time of a draw call bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed, const VkPipelineBindPoint bind_point) const { const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const char *function = CommandTypeString(cmd_type); const auto lv_bind_point = ConvertToLvlBindPoint(bind_point); const auto &state = cb_node->lastBound[lv_bind_point]; const auto *pipe = state.pipeline_state; if (nullptr == pipe) { return LogError(cb_node->commandBuffer(), vuid.pipeline_bound, "Must not call %s on this command buffer while there is no %s pipeline bound.", function, bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR ? "RayTracing" : bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute"); } bool result = false; if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) { // First check flag states result |= ValidateDrawStateFlags(cb_node, pipe, indexed, vuid.dynamic_state); if (cb_node->activeRenderPass && cb_node->activeFramebuffer) { // Verify attachments for unprotected/protected command buffer. if (enabled_features.core11.protectedMemory == VK_TRUE && cb_node->active_attachments) { uint32_t i = 0; for (const auto &view_state : *cb_node->active_attachments.get()) { const auto &subpass = cb_node->active_subpasses->at(i); if (subpass.used && view_state && !view_state->Destroyed()) { std::string image_desc = "Image is "; image_desc.append(string_VkImageUsageFlagBits(subpass.usage)); // Because inputAttachment is read only, it doesn't need to care protected command buffer case. // Some CMD_TYPE could not be protected. See VUID 02711. if (subpass.usage != VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT && vuid.protected_command_buffer != kVUIDUndefined) { result |= ValidateUnprotectedImage(cb_node, view_state->image_state.get(), function, vuid.protected_command_buffer, image_desc.c_str()); } result |= ValidateProtectedImage(cb_node, view_state->image_state.get(), function, vuid.unprotected_command_buffer, image_desc.c_str()); } ++i; } } } } // Now complete other state checks string error_string; auto const &pipeline_layout = pipe->pipeline_layout.get(); // Check if the current pipeline is compatible for the maximum used set with the bound sets. if (pipe->active_slots.size() > 0 && !CompatForSet(pipe->max_active_slot, state, pipeline_layout->compat_for_set)) { LogObjectList objlist(pipe->pipeline()); objlist.add(pipeline_layout->layout()); objlist.add(state.pipeline_layout); result |= LogError(objlist, vuid.compatible_pipeline, "%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32 " with bound descriptor sets, last bound with %s", CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline()).c_str(), report_data->FormatHandle(pipeline_layout->layout()).c_str(), pipe->max_active_slot, report_data->FormatHandle(state.pipeline_layout).c_str()); } for (const auto &set_binding_pair : pipe->active_slots) { uint32_t set_index = set_binding_pair.first; // If valid set is not bound throw an error if ((state.per_set.size() <= set_index) || (!state.per_set[set_index].bound_descriptor_set)) { result |= LogError(cb_node->commandBuffer(), kVUID_Core_DrawState_DescriptorSetNotBound, "%s(): %s uses set #%u but that set is not bound.", CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline()).c_str(), set_index); } else if (!VerifySetLayoutCompatibility(report_data, state.per_set[set_index].bound_descriptor_set, pipeline_layout, set_index, error_string)) { // Set is bound but not compatible w/ overlapping pipeline_layout from PSO VkDescriptorSet set_handle = state.per_set[set_index].bound_descriptor_set->GetSet(); LogObjectList objlist(set_handle); objlist.add(pipeline_layout->layout()); result |= LogError(objlist, kVUID_Core_DrawState_PipelineLayoutsIncompatible, "%s(): %s bound as set #%u is not compatible with overlapping %s due to: %s", CommandTypeString(cmd_type), report_data->FormatHandle(set_handle).c_str(), set_index, report_data->FormatHandle(pipeline_layout->layout()).c_str(), error_string.c_str()); } else { // Valid set is bound and layout compatible, validate that it's updated // Pull the set node const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[set_index].bound_descriptor_set; // Validate the draw-time state for this descriptor set std::string err_str; // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks. // Here, the currently bound pipeline determines whether an image validation check is redundant... // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline. cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second); const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pipe); // We can skip validating the descriptor set if "nothing" has changed since the last validation. // Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are // any dynamic descriptors, always revalidate rather than caching the values. We currently only // apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the // binding_req_map which could potentially be expensive. bool descriptor_set_changed = !reduced_map.IsManyDescriptors() || // Revalidate each time if the set has dynamic offsets state.per_set[set_index].dynamicOffsets.size() > 0 || // Revalidate if descriptor set (or contents) has changed state.per_set[set_index].validated_set != descriptor_set || state.per_set[set_index].validated_set_change_count != descriptor_set->GetChangeCount() || (!disabled[image_layout_validation] && state.per_set[set_index].validated_set_image_layout_change_count != cb_node->image_layout_change_count); bool need_validate = descriptor_set_changed || // Revalidate if previous bindingReqMap doesn't include new bindingReqMap !std::includes(state.per_set[set_index].validated_set_binding_req_map.begin(), state.per_set[set_index].validated_set_binding_req_map.end(), binding_req_map.begin(), binding_req_map.end()); if (need_validate) { if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) { // Only validate the bindings that haven't already been validated BindingReqMap delta_reqs; std::set_difference(binding_req_map.begin(), binding_req_map.end(), state.per_set[set_index].validated_set_binding_req_map.begin(), state.per_set[set_index].validated_set_binding_req_map.end(), layer_data::insert_iterator<BindingReqMap>(delta_reqs, delta_reqs.begin())); result |= ValidateDrawState(descriptor_set, delta_reqs, state.per_set[set_index].dynamicOffsets, cb_node, cb_node->active_attachments.get(), cb_node->active_subpasses.get(), function, vuid); } else { result |= ValidateDrawState(descriptor_set, binding_req_map, state.per_set[set_index].dynamicOffsets, cb_node, cb_node->active_attachments.get(), cb_node->active_subpasses.get(), function, vuid); } } } } // Check general pipeline state that needs to be validated at drawtime if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) { result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pipe); } // Verify if push constants have been set // NOTE: Currently not checking whether active push constants are compatible with the active pipeline, nor whether the // "life times" of push constants are correct. // Discussion on validity of these checks can be found at https://gitlab.khronos.org/vulkan/vulkan/-/issues/2602. if (!cb_node->push_constant_data_ranges || (pipeline_layout->push_constant_ranges == cb_node->push_constant_data_ranges)) { for (const auto &stage : pipe->stage_state) { const auto *entrypoint = stage.module->FindEntrypointStruct(stage.create_info->pName, stage.create_info->stage); if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) { continue; } // Edge case where if the shader is using push constants statically and there never was a vkCmdPushConstants if (!cb_node->push_constant_data_ranges && !enabled_features.maintenance4_features.maintenance4) { LogObjectList objlist(cb_node->commandBuffer()); objlist.add(pipeline_layout->layout()); objlist.add(pipe->pipeline()); result |= LogError(objlist, vuid.push_constants_set, "%s(): Shader in %s uses push-constant statically but vkCmdPushConstants was not called yet for " "pipeline layout %s.", CommandTypeString(cmd_type), string_VkShaderStageFlags(stage.stage_flag).c_str(), report_data->FormatHandle(pipeline_layout->layout()).c_str()); } const auto it = cb_node->push_constant_data_update.find(stage.stage_flag); if (it == cb_node->push_constant_data_update.end()) { // This error has been printed in ValidatePushConstantUsage. break; } } } return result; } bool CoreChecks::ValidateCmdRayQueryState(const CMD_BUFFER_STATE *cb_state, CMD_TYPE cmd_type, const VkPipelineBindPoint bind_point) const { bool skip = false; const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const auto lv_bind_point = ConvertToLvlBindPoint(bind_point); const auto &state = cb_state->lastBound[lv_bind_point]; const auto *pipe = state.pipeline_state; bool ray_query_shader = false; if (nullptr != pipe) { if (bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR) { ray_query_shader = true; } else { // TODO - Loop through shader for RayQueryKHR for draw/dispatch commands } } if (cb_state->unprotected == false && ray_query_shader) { skip |= LogError(cb_state->commandBuffer(), vuid.ray_query_protected_cb, "%s(): can't use in protected command buffers for RayQuery operations.", CommandTypeString(cmd_type)); } return skip; } bool CoreChecks::ValidateGraphicsPipelineBlendEnable(const PIPELINE_STATE *pPipeline) const { bool skip = false; const auto& create_info = pPipeline->create_info.graphics; if (create_info.pColorBlendState) { const auto *subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[create_info.subpass]; uint32_t numberColorAttachments = (pPipeline->rp_state->use_dynamic_rendering) ? pPipeline->rp_state->dynamic_rendering_pipeline_create_info.colorAttachmentCount : subpass_desc->colorAttachmentCount; for (uint32_t i = 0; i < pPipeline->attachments.size() && i < numberColorAttachments; ++i) { VkFormatFeatureFlags2KHR format_features; if (pPipeline->rp_state->use_dynamic_rendering) { if (create_info.pColorBlendState->attachmentCount != numberColorAttachments) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06055", "Pipeline %s: VkPipelineRenderingCreateInfoKHR::colorAttachmentCount (%" PRIu32 ") must equal pColorBlendState->attachmentCount (%" PRIu32 ")", report_data->FormatHandle(pPipeline->pipeline()).c_str(), numberColorAttachments, create_info.pColorBlendState->attachmentCount); } } else { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; const auto attachment_desc = pPipeline->rp_state->createInfo.pAttachments[attachment]; format_features = GetPotentialFormatFeatures(attachment_desc.format); if (create_info.pRasterizationState && !create_info.pRasterizationState->rasterizerDiscardEnable && pPipeline->attachments[i].blendEnable && !(format_features & VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR)) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06041", "vkCreateGraphicsPipelines(): pipeline.pColorBlendState.pAttachments[%" PRIu32 "].blendEnable is VK_TRUE but format %s of the corresponding attachment description (subpass %" PRIu32 ", attachment %" PRIu32 ") does not support VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT.", i, string_VkFormat(attachment_desc.format), create_info.subpass, attachment); } } } } return skip; } bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const { bool skip = false; const auto pipeline = pPipelines[pipelineIndex].get(); const auto &create_info = pipeline->create_info.graphics; // If create derivative bit is set, check that we've specified a base // pipeline correctly, and that the base pipeline was created to allow // derivatives. if (create_info.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { std::shared_ptr<const PIPELINE_STATE> base_pipeline; if (!((create_info.basePipelineHandle != VK_NULL_HANDLE) ^ (create_info.basePipelineIndex != -1))) { // TODO: This check is a superset of VUID-VkGraphicsPipelineCreateInfo-flags-00724 and // TODO: VUID-VkGraphicsPipelineCreateInfo-flags-00725 skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo[%d]: exactly one of base pipeline index and handle must be specified", pipelineIndex); } else if (create_info.basePipelineIndex != -1) { if (create_info.basePipelineIndex >= pipelineIndex) { skip |= LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00720", "Invalid Pipeline CreateInfo[%d]: base pipeline must occur earlier in array than derivative pipeline.", pipelineIndex); } else { base_pipeline = pPipelines[create_info.basePipelineIndex]; } } else if (create_info.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = Get<PIPELINE_STATE>(create_info.basePipelineHandle); } if (base_pipeline && !(base_pipeline->create_info.graphics.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00721", "Invalid Pipeline CreateInfo[%d]: base pipeline does not allow derivatives.", pipelineIndex); } } // Check for portability errors if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) { if ((VK_FALSE == enabled_features.portability_subset_features.triangleFans) && (VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN == pipeline->topology_at_rasterizer)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-triangleFans-04452", "Invalid Pipeline CreateInfo[%d] (portability error): VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN is not supported", pipelineIndex); } // Validate vertex inputs for (const auto &desc : pipeline->vertex_binding_descriptions_) { const auto min_alignment = phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment; if ((desc.stride < min_alignment) || (min_alignment == 0) || ((desc.stride % min_alignment) != 0)) { skip |= LogError( device, "VUID-VkVertexInputBindingDescription-stride-04456", "Invalid Pipeline CreateInfo[%d] (portability error): Vertex input stride must be at least as large as and a " "multiple of VkPhysicalDevicePortabilitySubsetPropertiesKHR::minVertexInputBindingStrideAlignment.", pipelineIndex); } } // Validate vertex attributes if (VK_FALSE == enabled_features.portability_subset_features.vertexAttributeAccessBeyondStride) { for (const auto &attrib : pipeline->vertex_attribute_descriptions_) { const auto vertex_binding_map_it = pipeline->vertex_binding_to_index_map_.find(attrib.binding); if (vertex_binding_map_it != pipeline->vertex_binding_to_index_map_.cend()) { const auto& desc = pipeline->vertex_binding_descriptions_[vertex_binding_map_it->second]; if ((attrib.offset + FormatElementSize(attrib.format)) > desc.stride) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-vertexAttributeAccessBeyondStride-04457", "Invalid Pipeline CreateInfo[%d] (portability error): (attribute.offset + " "sizeof(vertex_description.format)) is larger than the vertex stride", pipelineIndex); } } } } // Validate polygon mode auto raster_state_ci = create_info.pRasterizationState; if ((VK_FALSE == enabled_features.portability_subset_features.pointPolygons) && raster_state_ci && (VK_FALSE == raster_state_ci->rasterizerDiscardEnable) && (VK_POLYGON_MODE_POINT == raster_state_ci->polygonMode)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-pointPolygons-04458", "Invalid Pipeline CreateInfo[%d] (portability error): point polygons are not supported", pipelineIndex); } } return skip; } // UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function. bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const { bool skip = false; const auto &create_info = pPipeline->create_info.graphics; safe_VkSubpassDescription2 *subpass_desc = nullptr; if (!pPipeline->rp_state->use_dynamic_rendering) { // Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState // produces nonsense errors that confuse users. Other layers should already // emit errors for renderpass being invalid. subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[create_info.subpass]; if (create_info.subpass >= pPipeline->rp_state->createInfo.subpassCount) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06046", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Subpass index %u is out of range for this renderpass (0..%u).", pipelineIndex, create_info.subpass, pPipeline->rp_state->createInfo.subpassCount - 1); subpass_desc = nullptr; } } if (create_info.pColorBlendState != NULL) { const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = create_info.pColorBlendState; if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06042", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: %s subpass %u has colorAttachmentCount of %u which doesn't " "match the pColorBlendState->attachmentCount of %u.", pipelineIndex, report_data->FormatHandle(pPipeline->rp_state->renderPass()).c_str(), create_info.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount); } if (!enabled_features.core.independentBlend) { if (pPipeline->attachments.size() > 1) { const VkPipelineColorBlendAttachmentState *const attachments = &pPipeline->attachments[0]; for (size_t i = 1; i < pPipeline->attachments.size(); i++) { // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains // only attachment state, so memcmp is best suited for the comparison if (memcmp(static_cast<const void *>(attachments), static_cast<const void *>(&attachments[i]), sizeof(attachments[0]))) { skip |= LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605", "Invalid Pipeline CreateInfo[%" PRIu32 "]: If independent blend feature not enabled, all elements of " "pAttachments must be identical.", pipelineIndex); break; } } } } if (!enabled_features.core.logicOp && (create_info.pColorBlendState->logicOpEnable != VK_FALSE)) { skip |= LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606", "Invalid Pipeline CreateInfo[%" PRIu32 "]: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.", pipelineIndex); } for (size_t i = 0; i < pPipeline->attachments.size(); i++) { if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor); } } if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor); } } if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor); } } if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError(device, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor); } } } auto color_write = lvl_find_in_chain<VkPipelineColorWriteCreateInfoEXT>(create_info.pColorBlendState->pNext); if (color_write) { if (color_write->attachmentCount != color_blend_state->attachmentCount) { skip |= LogError( device, "VUID-VkPipelineColorWriteCreateInfoEXT-attachmentCount-04802", "vkCreateGraphicsPipelines(): VkPipelineColorWriteCreateInfoEXT in the pNext chain of pPipelines[%" PRIu32 "].pColorBlendState has different attachmentCount (%" PRIu32 ") than pColorBlendState.attachmentCount (%" PRIu32 ").", pipelineIndex, color_write->attachmentCount, color_blend_state->attachmentCount); } if (!enabled_features.color_write_features.colorWriteEnable) { for (uint32_t i = 0; i < color_write->attachmentCount; ++i) { if (color_write->pColorWriteEnables[i] != VK_TRUE) { skip |= LogError(device, "VUID-VkPipelineColorWriteCreateInfoEXT-pAttachments-04801", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState pNext chain includes VkPipelineColorWriteCreateInfoEXT with " "pColorWriteEnables[%" PRIu32 "] = VK_FALSE, but colorWriteEnable is not enabled.", pipelineIndex, i); } } } } const auto *color_blend_advanced = LvlFindInChain<VkPipelineColorBlendAdvancedStateCreateInfoEXT>(create_info.pColorBlendState->pNext); if (color_blend_advanced) { if (!phys_dev_ext_props.blend_operation_advanced_props.advancedBlendCorrelatedOverlap && color_blend_advanced->blendOverlap != VK_BLEND_OVERLAP_UNCORRELATED_EXT) { skip |= LogError( device, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-blendOverlap-01426", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState pNext chain contains VkPipelineColorBlendAdvancedStateCreateInfoEXT structure with " "blendOverlap equal to %s, but " "VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT::advancedBlendCorrelatedOverlap is not supported.", pipelineIndex, string_VkBlendOverlapEXT(color_blend_advanced->blendOverlap)); } if (!phys_dev_ext_props.blend_operation_advanced_props.advancedBlendNonPremultipliedDstColor && color_blend_advanced->dstPremultiplied != VK_TRUE) { skip |= LogError( device, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-dstPremultiplied-01425", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState pNext chain contains VkPipelineColorBlendAdvancedStateCreateInfoEXT structure with " "dstPremultiplied equal to VK_FALSE, but " "VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT::advancedBlendNonPremultipliedDstColor is not supported.", pipelineIndex); } if (!phys_dev_ext_props.blend_operation_advanced_props.advancedBlendNonPremultipliedSrcColor && color_blend_advanced->srcPremultiplied != VK_TRUE) { skip |= LogError( device, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-srcPremultiplied-01424", "vkCreateGraphicsPipelines(): pPipelines[%" PRIu32 "].pColorBlendState pNext chain contains VkPipelineColorBlendAdvancedStateCreateInfoEXT structure with " "srcPremultiplied equal to VK_FALSE, but " "VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT::advancedBlendNonPremultipliedSrcColor is not supported.", pipelineIndex); } } } if (ValidateGraphicsPipelineShaderState(pPipeline)) { skip = true; } skip |= ValidateGraphicsPipelineBlendEnable(pPipeline); // Each shader's stage must be unique for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { if (pPipeline->active_shaders & stage) { const auto &states = pPipeline->stage_state; if (std::count_if(states.begin(), states.end(), [stage](const PipelineStageState &pss) { return stage == pss.stage_flag; }) > 1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00726", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Multiple shaders provided for stage %s", pipelineIndex, string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); } } } if (!enabled_features.core.geometryShader && (pPipeline->active_shaders & VK_SHADER_STAGE_GEOMETRY_BIT)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00704", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Geometry Shader not supported.", pipelineIndex); } if (!enabled_features.core.tessellationShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00705", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Tessellation Shader not supported.", pipelineIndex); } if (IsExtEnabled(device_extensions.vk_nv_mesh_shader)) { // VS or mesh is required if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-02096", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Vertex Shader or Mesh Shader required.", pipelineIndex); } // Can't mix mesh and VTG if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) && (pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02095", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Geometric shader stages must either be all mesh (mesh | task) " "or all VTG (vertex, tess control, tess eval, geom).", pipelineIndex); } } else { // VS is required if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00727", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Vertex Shader required.", pipelineIndex); } } if (!enabled_features.mesh_shader_features.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02091", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Mesh Shader not supported.", pipelineIndex); } if (!enabled_features.mesh_shader_features.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02092", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Task Shader not supported.", pipelineIndex); } // Either both or neither TC/TE shaders should be defined bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0; bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0; if (has_control && !has_eval) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: TE and TC shaders must be included or excluded as a pair.", pipelineIndex); } if (!has_control && has_eval) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: TE and TC shaders must be included or excluded as a pair.", pipelineIndex); } // Compute shaders should be specified independent of Gfx shaders if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00728", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Do not specify Compute Shader for Gfx Pipeline.", pipelineIndex); } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !create_info.pInputAssemblyState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02098", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Missing pInputAssemblyState.", pipelineIndex); } // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. // Mismatching primitive topology and tessellation fails graphics pipeline creation. if (has_control && has_eval && (!create_info.pInputAssemblyState || create_info.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for " "tessellation pipelines.", pipelineIndex); } if (create_info.pInputAssemblyState) { if (create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { if (!has_control || !has_eval) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-topology-00737", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid " "for tessellation pipelines.", pipelineIndex); } } if ((create_info.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) && (create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { if (IsExtEnabled(device_extensions.vk_ext_primitive_topology_list_restart)) { if (create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { if (!enabled_features.primitive_topology_list_restart_features.primitiveTopologyPatchListRestart) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-06253", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: topology is %s and primitiveRestartEnable is VK_TRUE and the " "primitiveTopologyPatchListRestart feature is not enabled.", pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology)); } } else if (!enabled_features.primitive_topology_list_restart_features.primitiveTopologyListRestart) { skip |= LogError( device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-06252", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: topology is %s and primitiveRestartEnable is VK_TRUE and the primitiveTopologyListRestart feature " "is not enabled.", pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology)); } } else { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.", pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology)); } } if ((enabled_features.core.geometryShader == VK_FALSE) && (create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: topology is %s and geometry shaders feature is not enabled. " "It is invalid.", pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology)); } if ((enabled_features.core.tessellationShader == VK_FALSE) && (create_info.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: topology is %s and tessellation shaders feature is not " "enabled. It is invalid.", pipelineIndex, string_VkPrimitiveTopology(create_info.pInputAssemblyState->topology)); } } // If a rasterization state is provided... if (create_info.pRasterizationState) { if ((create_info.pRasterizationState->depthClampEnable == VK_TRUE) && (!enabled_features.core.depthClamp)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: the depthClamp device feature is disabled: the " "depthClampEnable member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.", pipelineIndex); } if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) && (create_info.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00754", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: the depthBiasClamp device feature is disabled: the " "depthBiasClamp member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the " "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled", pipelineIndex); } // If rasterization is enabled... if (create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) { if ((create_info.pMultisampleState->alphaToOneEnable == VK_TRUE) && (!enabled_features.core.alphaToOne)) { skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: the alphaToOne device feature is disabled: the alphaToOneEnable " "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.", pipelineIndex); } // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure if (subpass_desc && subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (!create_info.pDepthStencilState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06043", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: pDepthStencilState is NULL when rasterization is enabled " "and subpass uses a depth/stencil attachment.", pipelineIndex); } else if (create_info.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) { if (!enabled_features.core.depthBounds) { skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: the depthBounds device feature is disabled: the " "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be " "set to VK_FALSE.", pipelineIndex); } // The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs if (!IsExtEnabled(device_extensions.vk_ext_depth_range_unrestricted) && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS)) { const float minDepthBounds = create_info.pDepthStencilState->minDepthBounds; const float maxDepthBounds = create_info.pDepthStencilState->maxDepthBounds; // Also VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00755 if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: VK_EXT_depth_range_unrestricted extension " "is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is " "true, and pDepthStencilState::minDepthBounds (=%f) is not within the [0.0, 1.0] range.", pipelineIndex, minDepthBounds); } if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: VK_EXT_depth_range_unrestricted extension " "is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is " "true, and pDepthStencilState::maxDepthBounds (=%f) is not within the [0.0, 1.0] range.", pipelineIndex, maxDepthBounds); } } } } // If subpass uses color attachments, pColorBlendState must be valid pointer if (subpass_desc) { uint32_t color_attachment_count = 0; for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { ++color_attachment_count; } } if (color_attachment_count > 0 && create_info.pColorBlendState == nullptr) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06044", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: pColorBlendState is NULL when rasterization is enabled and " "subpass uses color attachments.", pipelineIndex); } constexpr int num_bits = sizeof(subpass_desc->viewMask) * CHAR_BIT; std::bitset<num_bits> view_bits(subpass_desc->viewMask); uint32_t view_bits_count = static_cast<uint32_t>(view_bits.count()); if (view_bits_count > 1) { if (!enabled_features.multiview_features.multiviewTessellationShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06047", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: subpass has %" PRIu32 " bits set in viewMask and pStages includes tessellation shaders, but the " "VkPhysicalDeviceMultiviewFeatures::multiviewTessellationShader features is not enabled.", pipelineIndex, view_bits_count); } if (!enabled_features.multiview_features.multiviewGeometryShader && pPipeline->active_shaders & VK_SHADER_STAGE_GEOMETRY_BIT) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06048", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: subpass has %" PRIu32 " bits set in viewMask and pStages includes geometry shader, but the " "VkPhysicalDeviceMultiviewFeatures::multiviewGeometryShader features is not enabled.", pipelineIndex, view_bits_count); } } } } auto provoking_vertex_state_ci = lvl_find_in_chain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>(create_info.pRasterizationState->pNext); if (provoking_vertex_state_ci && provoking_vertex_state_ci->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT && !enabled_features.provoking_vertex_features.provokingVertexLast) { skip |= LogError( device, "VUID-VkPipelineRasterizationProvokingVertexStateCreateInfoEXT-provokingVertexMode-04883", "provokingVertexLast feature is not enabled."); } const auto rasterization_state_stream_ci = LvlFindInChain<VkPipelineRasterizationStateStreamCreateInfoEXT>( pPipeline->create_info.graphics.pRasterizationState->pNext); if (rasterization_state_stream_ci) { if (!enabled_features.transform_feedback_features.geometryStreams) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-geometryStreams-02324", "pCreateInfos[%" PRIu32 "].pRasterizationState pNext chain includes VkPipelineRasterizationStateStreamCreateInfoEXT, but " "geometryStreams feature is not enabled.", pipelineIndex); } else if (phys_dev_ext_props.transform_feedback_props.transformFeedbackRasterizationStreamSelect == VK_FALSE && rasterization_state_stream_ci->rasterizationStream != 0) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-rasterizationStream-02326", "VkPhysicalDeviceTransformFeedbackPropertiesEXT::transformFeedbackRasterizationStreamSelect is " "VK_FALSE, but pCreateInfos[%" PRIu32 "].pRasterizationState pNext chain includes VkPipelineRasterizationStateStreamCreateInfoEXT with " "rasterizationStream (%" PRIu32 ") not equal to 0.", pipelineIndex, rasterization_state_stream_ci->rasterizationStream); } else if (rasterization_state_stream_ci->rasterizationStream >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams) { skip |= LogError( device, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-rasterizationStream-02325", "pCreateInfos[%" PRIu32 "].pRasterizationState pNext chain includes VkPipelineRasterizationStateStreamCreateInfoEXT with " "rasterizationStream (%" PRIu32 ") not less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams (%" PRIu32 ").", pipelineIndex, rasterization_state_stream_ci->rasterizationStream, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams); } } const auto rasterization_conservative_state_ci = LvlFindInChain<VkPipelineRasterizationConservativeStateCreateInfoEXT>(create_info.pRasterizationState->pNext); if (rasterization_conservative_state_ci) { if (rasterization_conservative_state_ci->extraPrimitiveOverestimationSize < 0.0f || rasterization_conservative_state_ci->extraPrimitiveOverestimationSize > phys_dev_ext_props.conservative_rasterization_props.maxExtraPrimitiveOverestimationSize) { skip |= LogError( device, "VUID-VkPipelineRasterizationConservativeStateCreateInfoEXT-extraPrimitiveOverestimationSize-01769", "pCreateInfos[%" PRIu32 "].pRasterizationState pNext chain includes VkPipelineRasterizationConservativeStateCreateInfoEXT with " "extraPrimitiveOverestimationSize (%f), which is not between 0.0 and " "VkPipelineRasterizationConservativeStateCreateInfoEXT::maxExtraPrimitiveOverestimationSize (%f).", pipelineIndex, rasterization_conservative_state_ci->extraPrimitiveOverestimationSize, phys_dev_ext_props.conservative_rasterization_props.maxExtraPrimitiveOverestimationSize); } } } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !create_info.pVertexInputState && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02097", "Invalid Pipeline CreateInfo[%" PRIu32 "] State: Missing pVertexInputState.", pipelineIndex); } auto vi = create_info.pVertexInputState; if (vi != NULL) { for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) { VkFormat format = vi->pVertexAttributeDescriptions[j].format; // Internal call to get format info. Still goes through layers, could potentially go directly to ICD. VkFormatProperties properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties); if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-format-00623", "vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32 "].pVertexInputState->vertexAttributeDescriptions[%d].format " "(%s) is not a supported vertex buffer format.", pipelineIndex, j, string_VkFormat(format)); } } } if (subpass_desc && create_info.pMultisampleState) { const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = create_info.pMultisampleState; auto accum_color_samples = [subpass_desc, pPipeline](uint32_t &samples) { for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } } }; if (!(IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples) || IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples))) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_num_samples = 0; accum_color_samples(subpass_num_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } // subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED. // Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED. if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", "vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32 "].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass color and/or depth attachment.", pipelineIndex, raster_samples); } } if (IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples)) { VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0); for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max( max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples); } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max( max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples); } if ((create_info.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) && (max_sample_count != static_cast<VkSampleCountFlagBits>(0)) && (multisample_state->rasterizationSamples != max_sample_count)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505", "vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32 "].pMultisampleState->rasterizationSamples (%s) != max " "attachment samples (%s) used in subpass %u.", pipelineIndex, string_VkSampleCountFlagBits(multisample_state->rasterizationSamples), string_VkSampleCountFlagBits(max_sample_count), create_info.subpass); } } if (IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples)) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; accum_color_samples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; const uint32_t subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); if (create_info.pDepthStencilState) { const bool ds_test_enabled = (create_info.pDepthStencilState->depthTestEnable == VK_TRUE) || (create_info.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) || (create_info.pDepthStencilState->stencilTestEnable == VK_TRUE); if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411", "vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32 "].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass depth attachment (%u).", pipelineIndex, raster_samples, subpass_depth_samples); } } } if (IsPowerOfTwo(subpass_color_samples)) { if (raster_samples < subpass_color_samples) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412", "vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32 "].pMultisampleState->rasterizationSamples (%u) " "is not greater or equal to the number of samples of the RenderPass color attachment (%u).", pipelineIndex, raster_samples, subpass_color_samples); } if (multisample_state) { if ((raster_samples > subpass_color_samples) && (multisample_state->sampleShadingEnable == VK_TRUE)) { skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415", "vkCreateGraphicsPipelines: pCreateInfo[%" PRIu32 "].pMultisampleState->sampleShadingEnable must be " "VK_FALSE when " "pCreateInfo[%" PRIu32 "].pMultisampleState->rasterizationSamples (%u) is greater than the number of " "samples of the " "subpass color attachment (%u).", pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples); } const auto *coverage_modulation_state = LvlFindInChain<VkPipelineCoverageModulationStateCreateInfoNV>(multisample_state->pNext); if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) { if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) { skip |= LogError( device, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] VkPipelineCoverageModulationStateCreateInfoNV " "coverageModulationTableCount of %u is invalid.", pipelineIndex, coverage_modulation_state->coverageModulationTableCount); } } } } } if (IsExtEnabled(device_extensions.vk_nv_coverage_reduction_mode)) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; uint32_t subpass_depth_samples = 0; accum_color_samples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } if (multisample_state && IsPowerOfTwo(subpass_color_samples) && (subpass_depth_samples == 0 || IsPowerOfTwo(subpass_depth_samples))) { const auto *coverage_reduction_state = LvlFindInChain<VkPipelineCoverageReductionStateCreateInfoNV>(multisample_state->pNext); if (coverage_reduction_state) { const VkCoverageReductionModeNV coverage_reduction_mode = coverage_reduction_state->coverageReductionMode; uint32_t combination_count = 0; std::vector<VkFramebufferMixedSamplesCombinationNV> combinations; DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count, nullptr); combinations.resize(combination_count); DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count, &combinations[0]); bool combination_found = false; for (const auto &combination : combinations) { if (coverage_reduction_mode == combination.coverageReductionMode && raster_samples == combination.rasterizationSamples && subpass_depth_samples == combination.depthStencilSamples && subpass_color_samples == combination.colorSamples) { combination_found = true; break; } } if (!combination_found) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] the specified combination of coverage " "reduction mode (%s), pMultisampleState->rasterizationSamples (%u), sample counts for " "the subpass color and depth/stencil attachments is not a valid combination returned by " "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV.", pipelineIndex, string_VkCoverageReductionModeNV(coverage_reduction_mode), raster_samples); } } } } if (IsExtEnabled(device_extensions.vk_nv_fragment_coverage_to_color)) { const auto coverage_to_color_state = LvlFindInChain<VkPipelineCoverageToColorStateCreateInfoNV>(multisample_state); if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) { bool attachment_is_valid = false; std::string error_detail; if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) { const auto& color_attachment_ref = subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation]; if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { const auto& color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment]; switch (color_attachment.format) { case VK_FORMAT_R8_UINT: case VK_FORMAT_R8_SINT: case VK_FORMAT_R16_UINT: case VK_FORMAT_R16_SINT: case VK_FORMAT_R32_UINT: case VK_FORMAT_R32_SINT: attachment_is_valid = true; break; default: std::ostringstream str; str << "references an attachment with an invalid format (" << string_VkFormat(color_attachment.format) << ")."; error_detail = str.str(); break; } } else { std::ostringstream str; str << "references an invalid attachment. The subpass pColorAttachments[" << coverage_to_color_state->coverageToColorLocation << "].attachment has the value VK_ATTACHMENT_UNUSED."; error_detail = str.str(); } } else { std::ostringstream str; str << "references an non-existing attachment since the subpass colorAttachmentCount is " << subpass_desc->colorAttachmentCount << "."; error_detail = str.str(); } if (!attachment_is_valid) { skip |= LogError(device, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", "vkCreateGraphicsPipelines: pCreateInfos[%" PRId32 "].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV " "coverageToColorLocation = %" PRIu32 " %s", pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str()); } } } if (IsExtEnabled(device_extensions.vk_ext_sample_locations)) { const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_state = LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(multisample_state->pNext); if (sample_location_state != nullptr) { if ((sample_location_state->sampleLocationsEnable == VK_TRUE) && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) == false)) { const VkSampleLocationsInfoEXT sample_location_info = sample_location_state->sampleLocationsInfo; skip |= ValidateSampleLocationsInfo(&sample_location_info, "vkCreateGraphicsPipelines"); const VkExtent2D grid_size = sample_location_info.sampleLocationGridSize; auto multisample_prop = LvlInitStruct<VkMultisamplePropertiesEXT>(); DispatchGetPhysicalDeviceMultisamplePropertiesEXT(physical_device, multisample_state->rasterizationSamples, &multisample_prop); const VkExtent2D max_grid_size = multisample_prop.maxSampleLocationGridSize; // Note order or "divide" in "sampleLocationsInfo must evenly divide VkMultisamplePropertiesEXT" if (SafeModulo(max_grid_size.width, grid_size.width) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.width (%u) " "must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.width (%u).", pipelineIndex, grid_size.width, max_grid_size.width); } if (SafeModulo(max_grid_size.height, grid_size.height) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.height (%u) " "must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.height (%u).", pipelineIndex, grid_size.height, max_grid_size.height); } if (sample_location_info.sampleLocationsPerPixel != multisample_state->rasterizationSamples) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01523", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationsPerPixel (%s) must " "be the same as the VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%s).", pipelineIndex, string_VkSampleCountFlagBits(sample_location_info.sampleLocationsPerPixel), string_VkSampleCountFlagBits(multisample_state->rasterizationSamples)); } } } } if (IsExtEnabled(device_extensions.vk_qcom_render_pass_shader_resolve)) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_input_attachment_samples = 0; for (uint32_t i = 0; i < subpass_desc->inputAttachmentCount; i++) { const auto attachment = subpass_desc->pInputAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { subpass_input_attachment_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } } if ((subpass_desc->flags & VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM) != 0) { if (raster_samples != subpass_input_attachment_samples) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizationSamples-04899", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: The subpass includes " "VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM " "but the input attachment VkSampleCountFlagBits (%u) does not match the " "VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%u) VkSampleCountFlagBits.", pipelineIndex, subpass_input_attachment_samples, multisample_state->rasterizationSamples); } if (multisample_state->sampleShadingEnable == VK_TRUE) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-sampleShadingEnable-04900", "vkCreateGraphicsPipelines() pCreateInfo[%" PRIu32 "]: The subpass includes " "VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM " "which requires sample shading is disabled, but " "VkPipelineMultisampleStateCreateInfo::sampleShadingEnable is true. ", pipelineIndex); } } } } skip |= ValidatePipelineCacheControlFlags(create_info.flags, pipelineIndex, "vkCreateGraphicsPipelines", "VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878"); // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378 if (!enabled_features.extended_dynamic_state_features.extendedDynamicState && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_CULL_MODE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRONT_FACE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_OP_EXT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Extended dynamic state used by the extendedDynamicState " "feature is not enabled", pipelineIndex); } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868 if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2 && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Extended dynamic state used by the extendedDynamicState2 " "feature is not enabled", pipelineIndex); } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869 if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp && IsDynamic(pPipeline, VK_DYNAMIC_STATE_LOGIC_OP_EXT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Extended dynamic state used by the " "extendedDynamicState2LogicOp feature is not enabled", pipelineIndex); } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870 if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints && IsDynamic(pPipeline, VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Extended dynamic state used by the " "extendedDynamicState2PatchControlPoints " "feature is not enabled", pipelineIndex); } const VkPipelineFragmentShadingRateStateCreateInfoKHR *fragment_shading_rate_state = LvlFindInChain<VkPipelineFragmentShadingRateStateCreateInfoKHR>(create_info.pNext); if (fragment_shading_rate_state && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR)) { const char *struct_name = "VkPipelineFragmentShadingRateStateCreateInfoKHR"; if (fragment_shading_rate_state->fragmentSize.width == 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04494", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Fragment width of %u has been specified in %s.", pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height == 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04495", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Fragment height of %u has been specified in %s.", pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name); } if (fragment_shading_rate_state->fragmentSize.width != 0 && !IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.width)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04496", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Non-power-of-two fragment width of %u has been specified in %s.", pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height != 0 && !IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.height)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04497", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Non-power-of-two fragment height of %u has been specified in %s.", pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name); } if (fragment_shading_rate_state->fragmentSize.width > 4) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04498", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Fragment width of %u specified in %s is too large.", pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height > 4) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04499", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Fragment height of %u specified in %s is too large", pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && fragment_shading_rate_state->fragmentSize.width != 1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Pipeline fragment width of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pipelineIndex, fragment_shading_rate_state->fragmentSize.width, struct_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && fragment_shading_rate_state->fragmentSize.height != 1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Pipeline fragment height of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pipelineIndex, fragment_shading_rate_state->fragmentSize.height, struct_name); } if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04501", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: First combiner operation of %s has been specified in %s, but " "primitiveFragmentShadingRate is not enabled", pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name); } if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04502", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Second combiner operation of %s has been specified in %s, but " "attachmentFragmentShadingRate is not enabled", pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: First combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is not supported", pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Second combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is not supported", pipelineIndex, string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name); } } const auto *discard_rectangle_state = LvlFindInChain<VkPipelineDiscardRectangleStateCreateInfoEXT>(create_info.pNext); if (discard_rectangle_state) { if (discard_rectangle_state->discardRectangleCount > phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles) { skip |= LogError( device, "VUID-VkPipelineDiscardRectangleStateCreateInfoEXT-discardRectangleCount-00582", "vkCreateGraphicsPipelines(): VkPipelineDiscardRectangleStateCreateInfoEXT::discardRectangleCount (%" PRIu32 ") in pNext chain of pCreateInfo[%" PRIu32 "] is not less than VkPhysicalDeviceDiscardRectanglePropertiesEXT::maxDiscardRectangles (%" PRIu32 ".", discard_rectangle_state->discardRectangleCount, pipelineIndex, phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles); } } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807 if (!enabled_features.vertex_input_dynamic_state_features.vertexInputDynamicState && IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: The vertexInputDynamicState feature must be enabled to use " "the VK_DYNAMIC_STATE_VERTEX_INPUT_EXT dynamic state", pipelineIndex); } if (!enabled_features.color_write_features.colorWriteEnable && IsDynamic(pPipeline, VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04800", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: The colorWriteEnable feature must be enabled to use the " "VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT dynamic state", pipelineIndex); } const auto rendering_struct = LvlFindInChain<VkPipelineRenderingCreateInfoKHR>(create_info.pNext); if (rendering_struct) { if ((rendering_struct->viewMask != 0) && !enabled_features.multiview_features.multiviewTessellationShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06057", "Pipeline %" PRIu32 " has VkPipelineRenderingCreateInfoKHR->viewMask(%" PRIu32 ") and " "multiviewTessellationShader is not enabled, contains tesselation shaders", pipelineIndex, rendering_struct->viewMask); } if ((rendering_struct->viewMask != 0) && !enabled_features.multiview_features.multiviewGeometryShader && (pPipeline->active_shaders & VK_SHADER_STAGE_GEOMETRY_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06058", "Pipeline %" PRIu32 " has VkPipelineRenderingCreateInfoKHR->viewMask(%" PRIu32 ") and " "multiviewGeometryShader is not enabled, contains geometry shader", pipelineIndex, rendering_struct->viewMask); } if ((pPipeline->create_info.graphics.pColorBlendState != nullptr) && (rendering_struct->colorAttachmentCount != pPipeline->create_info.graphics.pColorBlendState->attachmentCount)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06060", "Pipeline %" PRIu32 " interface pColorBlendState->attachmentCount %" PRIu32 " and " "VkPipelineRenderingCreateInfoKHR->colorAttachmentCount %" PRIu32 " must be equal", pipelineIndex, rendering_struct->colorAttachmentCount, pPipeline->create_info.graphics.pColorBlendState->attachmentCount); } for (uint32_t color_index = 0; color_index < rendering_struct->colorAttachmentCount; color_index++) { const VkFormat color_format = rendering_struct->pColorAttachmentFormats[color_index]; if (color_format != VK_FORMAT_UNDEFINED) { VkFormatFeatureFlags2KHR format_features = GetPotentialFormatFeatures(color_format); if (((format_features & VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR) == 0) && (pPipeline->create_info.graphics.pColorBlendState->pAttachments[color_index].blendEnable != VK_FALSE)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06062", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: pColorBlendState->blendEnable must be false ", pipelineIndex); } if ((format_features & VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR) == 0) { skip |= LogError(device, "VUID-VkPipelineRenderingCreateInfoKHR-pColorAttachmentFormats-06064", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: color_format (%s) must be a format with potential format features that include " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT", pipelineIndex, string_VkFormat(color_format)); } } } if (rendering_struct->depthAttachmentFormat != VK_FORMAT_UNDEFINED) { VkFormatFeatureFlags2KHR format_features = GetPotentialFormatFeatures(rendering_struct->depthAttachmentFormat); if ((format_features & VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR) == 0) { skip |= LogError(device, "VUID-VkPipelineRenderingCreateInfoKHR-depthAttachmentFormat-06065", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: depthAttachmentFormat (%s) must be a format with potential format features that include " "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT", pipelineIndex, string_VkFormat(rendering_struct->depthAttachmentFormat)); } } if (rendering_struct->stencilAttachmentFormat != VK_FORMAT_UNDEFINED) { VkFormatFeatureFlags2KHR format_features = GetPotentialFormatFeatures(rendering_struct->stencilAttachmentFormat); if ((format_features & VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR) == 0) { skip |= LogError(device, "VUID-VkPipelineRenderingCreateInfoKHR-stencilAttachmentFormat-06164", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: stencilAttachmentFormat (%s) must be a format with potential format features that include " "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT", pipelineIndex, string_VkFormat(rendering_struct->stencilAttachmentFormat)); } } if ((rendering_struct->depthAttachmentFormat != VK_FORMAT_UNDEFINED) && (rendering_struct->stencilAttachmentFormat != VK_FORMAT_UNDEFINED) && (rendering_struct->depthAttachmentFormat != rendering_struct->stencilAttachmentFormat)) { skip |= LogError(device, "VUID-VkPipelineRenderingCreateInfoKHR-depthAttachmentFormat-06165", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: depthAttachmentFormat is not VK_FORMAT_UNDEFINED and stencilAttachmentFormat is not " "VK_FORMAT_UNDEFINED, but depthAttachmentFormat (%s) does not equal stencilAttachmentFormat (%s)", pipelineIndex, string_VkFormat(rendering_struct->depthAttachmentFormat), string_VkFormat(rendering_struct->stencilAttachmentFormat)); } if ((enabled_features.core11.multiview == VK_FALSE) && (rendering_struct->viewMask != 0)) { skip |= LogError(device, "VUID-VkPipelineRenderingCreateInfoKHR-multiview-06066", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: multiview is not enabled but viewMask is (%u).", pipelineIndex, rendering_struct->viewMask); } if (MostSignificantBit(rendering_struct->viewMask) >= phys_dev_props_core11.maxMultiviewViewCount) { skip |= LogError(device, "VUID-VkPipelineRenderingCreateInfoKHR-viewMask-06067", "vkCreateGraphicsPipelines() pCreateInfos[%" PRIu32 "]: Most significant bit in " "VkPipelineRenderingCreateInfoKHR->viewMask(%u) must be less maxMultiviewViewCount(%u)", pipelineIndex, rendering_struct->viewMask, phys_dev_props_core11.maxMultiviewViewCount); } } return skip; } // Block of code at start here specifically for managing/tracking DSs // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer // func_str is the name of the calling function // Return false if no errors occur // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const { if (disabled[object_in_use]) return false; bool skip = false; auto set_node = Get<cvdescriptorset::DescriptorSet>(set); if (set_node) { // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here if (set_node->InUse()) { skip |= LogError(set, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309", "Cannot call %s() on %s that is in use by a command buffer.", func_str, report_data->FormatHandle(set).c_str()); } } return skip; } // If a renderpass is active, verify that the given command type is appropriate for current subpass state bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const { if (!pCB->activeRenderPass) return false; bool skip = false; if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY && pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS && cmd_type != CMD_NEXTSUBPASS2 && cmd_type != CMD_NEXTSUBPASS2KHR && cmd_type != CMD_ENDRENDERPASS2 && cmd_type != CMD_ENDRENDERPASS2KHR)) { skip |= LogError(pCB->commandBuffer(), kVUID_Core_DrawState_InvalidCommandBuffer, "%s() cannot be called in a subpass using secondary command buffers.", kGeneratedCommandNameList[cmd_type]); } return skip; } bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags, const char *error_code) const { auto pool = cb_node->command_pool; if (pool) { const uint32_t queue_family_index = pool->queueFamilyIndex; const VkQueueFlags queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags; if (!(required_flags & queue_flags)) { string required_flags_string; for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_SPARSE_BINDING_BIT, VK_QUEUE_PROTECTED_BIT}) { if (flag & required_flags) { if (required_flags_string.size()) { required_flags_string += " or "; } required_flags_string += string_VkQueueFlagBits(flag); } } return LogError(cb_node->commandBuffer(), error_code, "%s(): Called in command buffer %s which was allocated from the command pool %s which was created with " "queueFamilyIndex %u which doesn't contain the required %s capability flags.", caller_name, report_data->FormatHandle(cb_node->commandBuffer()).c_str(), report_data->FormatHandle(pool->commandPool()).c_str(), queue_family_index, required_flags_string.c_str()); } } return false; } bool CoreChecks::ValidateSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo, const char *apiName) const { bool skip = false; const VkSampleCountFlagBits sample_count = pSampleLocationsInfo->sampleLocationsPerPixel; const uint32_t sample_total_size = pSampleLocationsInfo->sampleLocationGridSize.width * pSampleLocationsInfo->sampleLocationGridSize.height * SampleCountSize(sample_count); if (pSampleLocationsInfo->sampleLocationsCount != sample_total_size) { skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527", "%s: VkSampleLocationsInfoEXT::sampleLocationsCount (%u) must equal grid width * grid height * pixel " "sample rate which currently is (%u * %u * %u).", apiName, pSampleLocationsInfo->sampleLocationsCount, pSampleLocationsInfo->sampleLocationGridSize.width, pSampleLocationsInfo->sampleLocationGridSize.height, SampleCountSize(sample_count)); } if ((phys_dev_ext_props.sample_locations_props.sampleLocationSampleCounts & sample_count) == 0) { skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsPerPixel-01526", "%s: VkSampleLocationsInfoEXT::sampleLocationsPerPixel of %s is not supported by the device, please check " "VkPhysicalDeviceSampleLocationsPropertiesEXT::sampleLocationSampleCounts for valid sample counts.", apiName, string_VkSampleCountFlagBits(sample_count)); } return skip; } bool CoreChecks::MatchSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo1, const VkSampleLocationsInfoEXT *pSampleLocationsInfo2) const { if (pSampleLocationsInfo1->sampleLocationsPerPixel != pSampleLocationsInfo2->sampleLocationsPerPixel || pSampleLocationsInfo1->sampleLocationGridSize.width != pSampleLocationsInfo2->sampleLocationGridSize.width || pSampleLocationsInfo1->sampleLocationGridSize.height != pSampleLocationsInfo2->sampleLocationGridSize.height || pSampleLocationsInfo1->sampleLocationsCount != pSampleLocationsInfo2->sampleLocationsCount) { return false; } for (uint32_t i = 0; i < pSampleLocationsInfo1->sampleLocationsCount; ++i) { if (pSampleLocationsInfo1->pSampleLocations[i].x != pSampleLocationsInfo2->pSampleLocations[i].x || pSampleLocationsInfo1->pSampleLocations[i].y != pSampleLocationsInfo2->pSampleLocations[i].y) { return false; } } return true; } static char const *GetCauseStr(VulkanTypedHandle obj) { if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated"; if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded"; return "destroyed"; } bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const { bool skip = false; for (const auto& entry: cb_state->broken_bindings) { const auto& obj = entry.first; const char *cause_str = GetCauseStr(obj); string vuid; std::ostringstream str; str << kVUID_Core_DrawState_InvalidCommandBuffer << "-" << object_string[obj.type]; vuid = str.str(); auto objlist = entry.second; //intentional copy objlist.add(cb_state->commandBuffer()); skip |= LogError(objlist, vuid, "You are adding %s to %s that is invalid because bound %s was %s.", call_source, report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(obj).c_str(), cause_str); } return skip; } bool CoreChecks::ValidateIndirectCmd(const CMD_BUFFER_STATE &cb_state, const BUFFER_STATE &buffer_state, CMD_TYPE cmd_type) const { bool skip = false; const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const char *caller_name = CommandTypeString(cmd_type); skip |= ValidateMemoryIsBoundToBuffer(&buffer_state, caller_name, vuid.indirect_contiguous_memory); skip |= ValidateBufferUsageFlags(&buffer_state, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, true, vuid.indirect_buffer_bit, caller_name, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT"); if (cb_state.unprotected == false) { skip |= LogError(cb_state.Handle(), vuid.indirect_protected_cb, "%s: Indirect commands can't be used in protected command buffers.", caller_name); } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; uint32_t count = 1 << physical_device_count; if (count <= deviceMask) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is invalid. Physical device count is %" PRIu32 ".", deviceMask, physical_device_count); } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; if (deviceMask == 0) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask); } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; if ((deviceMask & pCB->initial_device_mask) != deviceMask) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->commandBuffer()).c_str(), pCB->initial_device_mask); } return skip; } bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const char *VUID) const { bool skip = false; if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) { skip |= LogError(pCB->commandBuffer(), VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str(), pCB->active_render_pass_device_mask); } return skip; } // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a // render pass. bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const { bool inside = false; if (pCB->activeRenderPass) { inside = LogError(pCB->commandBuffer(), msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName, report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str()); } return inside; } // Flags validation error if the associated call is made outside a render pass. The apiName // routine should ONLY be called inside a render pass. bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const { bool outside = false; if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { outside = LogError(pCB->commandBuffer(), msgCode, "%s: This call must be issued inside an active render pass.", apiName); } return outside; } bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family, const char *err_code, const char *cmd_name, const char *queue_family_var_name) const { bool skip = false; if (requested_queue_family >= pd_state->queue_family_known_count) { const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; skip |= LogError(pd_state->Handle(), err_code, "%s: %s (= %" PRIu32 ") is not less than any previously obtained pQueueFamilyPropertyCount from " "vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).", cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, std::to_string(pd_state->queue_family_known_count).c_str()); } return skip; } // Verify VkDeviceQueueCreateInfos bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count, const VkDeviceQueueCreateInfo *infos) const { bool skip = false; const uint32_t not_used = std::numeric_limits<uint32_t>::max(); struct create_flags { // uint32_t is to represent the queue family index to allow for better error messages uint32_t unprocted_index; uint32_t protected_index; create_flags(uint32_t a, uint32_t b) : unprocted_index(a), protected_index(b) {} }; layer_data::unordered_map<uint32_t, create_flags> queue_family_map; for (uint32_t i = 0; i < info_count; ++i) { const auto requested_queue_family = infos[i].queueFamilyIndex; const bool protected_create_bit = (infos[i].flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) != 0; std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex"; skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice", queue_family_var_name.c_str()); if (api_version == VK_API_VERSION_1_0) { // Vulkan 1.0 didn't have protected memory so always needed unique info create_flags flags = {requested_queue_family, not_used}; if (queue_family_map.emplace(requested_queue_family, flags).second == false) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372", "CreateDevice(): %s (=%" PRIu32 ") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d].", queue_family_var_name.c_str(), requested_queue_family, queue_family_map.at(requested_queue_family).unprocted_index); } } else { // Vulkan 1.1 and up can have 2 queues be same family index if one is protected and one isn't auto it = queue_family_map.find(requested_queue_family); if (it == queue_family_map.end()) { // Add first time seeing queue family index and what the create flags were create_flags new_flags = {not_used, not_used}; if (protected_create_bit) { new_flags.protected_index = requested_queue_family; } else { new_flags.unprocted_index = requested_queue_family; } queue_family_map.emplace(requested_queue_family, new_flags); } else { // The queue family was seen, so now need to make sure the flags were different if (protected_create_bit) { if (it->second.protected_index != not_used) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-queueFamilyIndex-02802", "CreateDevice(): %s (=%" PRIu32 ") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d] which both have " "VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT.", queue_family_var_name.c_str(), requested_queue_family, queue_family_map.at(requested_queue_family).protected_index); } else { it->second.protected_index = requested_queue_family; } } else { if (it->second.unprocted_index != not_used) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-queueFamilyIndex-02802", "CreateDevice(): %s (=%" PRIu32 ") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d].", queue_family_var_name.c_str(), requested_queue_family, queue_family_map.at(requested_queue_family).unprocted_index); } else { it->second.unprocted_index = requested_queue_family; } } } } const VkQueueFamilyProperties requested_queue_family_props = pd_state->queue_family_properties[requested_queue_family]; // if using protected flag, make sure queue supports it if (protected_create_bit && ((requested_queue_family_props.queueFlags & VK_QUEUE_PROTECTED_BIT) == 0)) { skip |= LogError( pd_state->Handle(), "VUID-VkDeviceQueueCreateInfo-flags-06449", "CreateDevice(): %s (=%" PRIu32 ") does not have VK_QUEUE_PROTECTED_BIT supported, but VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT is being used.", queue_family_var_name.c_str(), requested_queue_family); } // Verify that requested queue count of queue family is known to be valid at this point in time if (requested_queue_family < pd_state->queue_family_known_count) { const auto requested_queue_count = infos[i].queueCount; const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size(); // spec guarantees at least one queue for each queue family const uint32_t available_queue_count = queue_family_has_props ? requested_queue_family_props.queueCount : 1; const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; if (requested_queue_count > available_queue_count) { const std::string count_note = queue_family_has_props ? "i.e. is not less than or equal to " + std::to_string(requested_queue_family_props.queueCount) : "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"; skip |= LogError( pd_state->Handle(), "VUID-VkDeviceQueueCreateInfo-queueCount-00382", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).", i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str()); } } const VkQueueFlags queue_flags = pd_state->queue_family_properties[requested_queue_family].queueFlags; if ((infos[i].flags == VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) && ((queue_flags & VK_QUEUE_PROTECTED_BIT) == VK_FALSE)) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceQueueCreateInfo-flags-06449", "vkCreateDevice: pCreateInfo->flags set to VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT on a queue that doesn't include VK_QUEUE_PROTECTED_BIT capability"); } } return skip; } bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const { bool skip = false; auto pd_state = Get<PHYSICAL_DEVICE_STATE>(gpu); // TODO: object_tracker should perhaps do this instead // and it does not seem to currently work anyway -- the loader just crashes before this point if (!pd_state) { skip |= LogError(device, kVUID_Core_DevLimit_MustQueryCount, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); } else { skip |= ValidateDeviceQueueCreateInfos(pd_state.get(), pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos); const VkPhysicalDeviceFragmentShadingRateFeaturesKHR *fragment_shading_rate_features = LvlFindInChain<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(pCreateInfo->pNext); if (fragment_shading_rate_features) { const VkPhysicalDeviceShadingRateImageFeaturesNV *shading_rate_image_features = LvlFindInChain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext); if (shading_rate_image_features && shading_rate_image_features->shadingRateImage) { if (fragment_shading_rate_features->pipelineFragmentShadingRate) { skip |= LogError( pd_state->Handle(), "VUID-VkDeviceCreateInfo-shadingRateImage-04478", "vkCreateDevice: Cannot enable shadingRateImage and pipelineFragmentShadingRate features simultaneously."); } if (fragment_shading_rate_features->primitiveFragmentShadingRate) { skip |= LogError( pd_state->Handle(), "VUID-VkDeviceCreateInfo-shadingRateImage-04479", "vkCreateDevice: Cannot enable shadingRateImage and primitiveFragmentShadingRate features simultaneously."); } if (fragment_shading_rate_features->attachmentFragmentShadingRate) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-shadingRateImage-04480", "vkCreateDevice: Cannot enable shadingRateImage and attachmentFragmentShadingRate features " "simultaneously."); } } const VkPhysicalDeviceFragmentDensityMapFeaturesEXT *fragment_density_map_features = LvlFindInChain<VkPhysicalDeviceFragmentDensityMapFeaturesEXT>(pCreateInfo->pNext); if (fragment_density_map_features && fragment_density_map_features->fragmentDensityMap) { if (fragment_shading_rate_features->pipelineFragmentShadingRate) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-fragmentDensityMap-04481", "vkCreateDevice: Cannot enable fragmentDensityMap and pipelineFragmentShadingRate features " "simultaneously."); } if (fragment_shading_rate_features->primitiveFragmentShadingRate) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-fragmentDensityMap-04482", "vkCreateDevice: Cannot enable fragmentDensityMap and primitiveFragmentShadingRate features " "simultaneously."); } if (fragment_shading_rate_features->attachmentFragmentShadingRate) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-fragmentDensityMap-04483", "vkCreateDevice: Cannot enable fragmentDensityMap and attachmentFragmentShadingRate features " "simultaneously."); } } } const auto *shader_image_atomic_int64_features = LvlFindInChain<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT>(pCreateInfo->pNext); if (shader_image_atomic_int64_features) { if (shader_image_atomic_int64_features->sparseImageInt64Atomics && !shader_image_atomic_int64_features->shaderImageInt64Atomics) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-None-04896", "vkCreateDevice: if shaderImageInt64Atomics feature is enabled then sparseImageInt64Atomics " "feature must also be enabled."); } } const auto *shader_atomic_float_features = LvlFindInChain<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT>(pCreateInfo->pNext); if (shader_atomic_float_features) { if (shader_atomic_float_features->sparseImageFloat32Atomics && !shader_atomic_float_features->shaderImageFloat32Atomics) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-None-04897", "vkCreateDevice: if sparseImageFloat32Atomics feature is enabled then shaderImageFloat32Atomics " "feature must also be enabled."); } if (shader_atomic_float_features->sparseImageFloat32AtomicAdd && !shader_atomic_float_features->shaderImageFloat32AtomicAdd) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceCreateInfo-None-04898", "vkCreateDevice: if sparseImageFloat32AtomicAdd feature is enabled then shaderImageFloat32AtomicAdd " "feature must also be enabled."); } } const auto *shader_atomic_float2_features = LvlFindInChain<VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT>(pCreateInfo->pNext); if (shader_atomic_float2_features) { if (shader_atomic_float2_features->sparseImageFloat32AtomicMinMax && !shader_atomic_float2_features->shaderImageFloat32AtomicMinMax) { skip |= LogError( pd_state->Handle(), "VUID-VkDeviceCreateInfo-sparseImageFloat32AtomicMinMax-04975", "vkCreateDevice: if sparseImageFloat32AtomicMinMax feature is enabled then shaderImageFloat32AtomicMinMax " "feature must also be enabled."); } } const auto *device_group_ci = LvlFindInChain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext); if (device_group_ci) { for (uint32_t i = 0; i < device_group_ci->physicalDeviceCount - 1; ++i) { for (uint32_t j = i + 1; j < device_group_ci->physicalDeviceCount; ++j) { if (device_group_ci->pPhysicalDevices[i] == device_group_ci->pPhysicalDevices[j]) { skip |= LogError(pd_state->Handle(), "VUID-VkDeviceGroupDeviceCreateInfo-pPhysicalDevices-00375", "vkCreateDevice: VkDeviceGroupDeviceCreateInfo has a duplicated physical device " "in pPhysicalDevices [%" PRIu32 "] and [%" PRIu32 "].", i, j); } } } } } return skip; } void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) { // The state tracker sets up the device state StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result); // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor // would be messier without. // TODO: Find a good way to do this hooklessly. ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation); CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data); core_checks->SetSetImageViewInitialLayoutCallback( [](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void { cb_node->SetImageViewInitialLayout(iv_state, layout); }); // Allocate shader validation cache if (!disabled[shader_validation_caching] && !disabled[shader_validation] && !core_checks->core_validation_cache) { std::string validation_cache_path; auto tmp_path = GetEnvironment("TMPDIR"); if (!tmp_path.size()) tmp_path = GetEnvironment("TMP"); if (!tmp_path.size()) tmp_path = GetEnvironment("TEMP"); if (!tmp_path.size()) tmp_path = "//tmp"; core_checks->validation_cache_path = tmp_path + "//shader_validation_cache"; #if defined(__linux__) || defined(__FreeBSD__) core_checks->validation_cache_path += "-" + std::to_string(getuid()); #endif core_checks->validation_cache_path += ".bin"; std::vector<char> validation_cache_data; std::ifstream read_file(core_checks->validation_cache_path.c_str(), std::ios::in | std::ios::binary); if (read_file) { std::copy(std::istreambuf_iterator<char>(read_file), {}, std::back_inserter(validation_cache_data)); read_file.close(); } else { LogInfo(core_checks->device, "VUID-NONE", "Cannot open shader validation cache at %s for reading (it may not exist yet)", core_checks->validation_cache_path.c_str()); } VkValidationCacheCreateInfoEXT cacheCreateInfo = {}; cacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT; cacheCreateInfo.pNext = NULL; cacheCreateInfo.initialDataSize = validation_cache_data.size(); cacheCreateInfo.pInitialData = validation_cache_data.data(); cacheCreateInfo.flags = 0; CoreLayerCreateValidationCacheEXT(*pDevice, &cacheCreateInfo, nullptr, &core_checks->core_validation_cache); } } void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { if (!device) return; imageLayoutMap.clear(); StateTracker::PreCallRecordDestroyDevice(device, pAllocator); if (core_validation_cache) { size_t validation_cache_size = 0; void *validation_cache_data = nullptr; CoreLayerGetValidationCacheDataEXT(device, core_validation_cache, &validation_cache_size, nullptr); validation_cache_data = (char *)malloc(sizeof(char) * validation_cache_size); if (!validation_cache_data) { LogInfo(device, "VUID-NONE", "Validation Cache Memory Error"); return; } VkResult result = CoreLayerGetValidationCacheDataEXT(device, core_validation_cache, &validation_cache_size, validation_cache_data); if (result != VK_SUCCESS) { LogInfo(device, "VUID-NONE", "Validation Cache Retrieval Error"); return; } FILE *write_file = fopen(validation_cache_path.c_str(), "wb"); if (write_file) { fwrite(validation_cache_data, sizeof(char), validation_cache_size, write_file); fclose(write_file); } else { LogInfo(device, "VUID-NONE", "Cannot open shader validation cache at %s for writing", validation_cache_path.c_str()); } free(validation_cache_data); CoreLayerDestroyValidationCacheEXT(device, core_validation_cache, NULL); } } bool CoreChecks::ValidateStageMaskHost(const Location &loc, VkPipelineStageFlags2KHR stageMask) const { bool skip = false; if ((stageMask & VK_PIPELINE_STAGE_HOST_BIT) != 0) { const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, sync_vuid_maps::SubmitError::kHostStageMask); skip |= LogError( device, vuid, "%s stage mask must not include VK_PIPELINE_STAGE_HOST_BIT as the stage can't be invoked inside a command buffer.", loc.Message().c_str()); } return skip; } bool CoreChecks::ValidateCommandBufferSimultaneousUse(const Location &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; if ((pCB->InUse() || current_submit_count > 1) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, SubmitError::kCmdNotSimultaneous); skip |= LogError(device, vuid, "%s %s is already in use and is not marked for simultaneous use.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str()); } return skip; } bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count, const char *vu_id) const { bool skip = false; if (disabled[command_buffer_state]) return skip; // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (cb_state->submitCount + current_submit_count > 1)) { skip |= LogError(cb_state->commandBuffer(), kVUID_Core_DrawState_CommandBufferSingleSubmitViolation, "%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64 "times.", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), cb_state->submitCount + current_submit_count); } // Validate that cmd buffers have been updated switch (cb_state->state) { case CB_INVALID_INCOMPLETE: case CB_INVALID_COMPLETE: skip |= ReportInvalidCommandBuffer(cb_state, call_source); break; case CB_NEW: skip |= LogError(cb_state->commandBuffer(), vu_id, "%s used in the call to %s is unrecorded and contains no commands.", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), call_source); break; case CB_RECORDING: skip |= LogError(cb_state->commandBuffer(), kVUID_Core_DrawState_NoEndCommandBuffer, "You must call vkEndCommandBuffer() on %s before this call to %s!", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), call_source); break; default: /* recorded */ break; } return skip; } // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex, uint32_t count, const uint32_t *indices) const { bool found = false; bool skip = false; for (uint32_t i = 0; i < count; i++) { if (indices[i] == queueFamilyIndex) { found = true; break; } } if (!found) { LogObjectList objlist(cb_node->commandBuffer()); objlist.add(object); skip = LogError(objlist, "VUID-vkQueueSubmit-pSubmits-04626", "vkQueueSubmit: %s contains %s which was not created allowing concurrent access to " "this queue family %d.", report_data->FormatHandle(cb_node->commandBuffer()).c_str(), report_data->FormatHandle(object).c_str(), queueFamilyIndex); } return skip; } // Validate that queueFamilyIndices of primary command buffers match this queue // Secondary command buffers were previously validated in vkCmdExecuteCommands(). bool CoreChecks::ValidateQueueFamilyIndices(const Location &loc, const CMD_BUFFER_STATE *pCB, VkQueue queue) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; auto pool = pCB->command_pool; auto queue_state = Get<QUEUE_STATE>(queue); if (pool && queue_state) { if (pool->queueFamilyIndex != queue_state->queueFamilyIndex) { LogObjectList objlist(pCB->commandBuffer()); objlist.add(queue); const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kCmdWrongQueueFamily); skip |= LogError(objlist, vuid, "%s Primary %s created in queue family %d is being submitted on %s " "from queue family %d.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str(), pool->queueFamilyIndex, report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex); } // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family for (const auto *base_node : pCB->object_bindings) { switch (base_node->Type()) { case kVulkanObjectTypeImage: { auto image_state = static_cast<const IMAGE_STATE *>(base_node); if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, image_state->Handle(), queue_state->queueFamilyIndex, image_state->createInfo.queueFamilyIndexCount, image_state->createInfo.pQueueFamilyIndices); } break; } case kVulkanObjectTypeBuffer: { auto buffer_state = static_cast<const BUFFER_STATE *>(base_node); if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, buffer_state->Handle(), queue_state->queueFamilyIndex, buffer_state->createInfo.queueFamilyIndexCount, buffer_state->createInfo.pQueueFamilyIndices); } break; } default: break; } } } return skip; } bool CoreChecks::ValidatePrimaryCommandBufferState( const Location &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count, QFOTransferCBScoreboards<QFOImageTransferBarrier> *qfo_image_scoreboards, QFOTransferCBScoreboards<QFOBufferTransferBarrier> *qfo_buffer_scoreboards) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; // Track in-use for resources off of primary and any secondary CBs bool skip = false; if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdInSubmit); skip |= LogError(pCB->commandBuffer(), vuid, "%s Command buffer %s must be allocated with VK_COMMAND_BUFFER_LEVEL_PRIMARY.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str()); } else { for (const auto *sub_cb : pCB->linkedCommandBuffers) { skip |= ValidateQueuedQFOTransfers(sub_cb, qfo_image_scoreboards, qfo_buffer_scoreboards); // TODO: replace with InvalidateCommandBuffers() at recording. if ((sub_cb->primaryCommandBuffer != pCB->commandBuffer()) && !(sub_cb->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { LogObjectList objlist(device); objlist.add(pCB->commandBuffer()); objlist.add(sub_cb->commandBuffer()); objlist.add(sub_cb->primaryCommandBuffer); const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdNotSimultaneous); skip |= LogError(objlist, vuid, "%s %s was submitted with secondary %s but that buffer has subsequently been bound to " "primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb->primaryCommandBuffer).c_str()); } } } // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device skip |= ValidateCommandBufferSimultaneousUse(loc, pCB, current_submit_count); skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards); const char *vuid = loc.function == Func::vkQueueSubmit ? "VUID-vkQueueSubmit-pCommandBuffers-00072" : "VUID-vkQueueSubmit2KHR-commandBuffer-03876"; skip |= ValidateCommandBufferState(pCB, loc.StringFunc().c_str(), current_submit_count, vuid); return skip; } bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *fence_state, const char *inflight_vuid, const char *retired_vuid, const char *func_name) const { bool skip = false; if (fence_state && fence_state->Scope() == kSyncScopeInternal) { switch (fence_state->State()) { case FENCE_INFLIGHT: skip |= LogError(fence_state->fence(), inflight_vuid, "%s: %s is already in use by another submission.", func_name, report_data->FormatHandle(fence_state->fence()).c_str()); break; case FENCE_RETIRED: skip |= LogError(fence_state->fence(), retired_vuid, "%s: %s submitted in SIGNALED state. Fences must be reset before being submitted", func_name, report_data->FormatHandle(fence_state->fence()).c_str()); break; default: break; } } return skip; } void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence, VkResult result) { StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result); if (result != VK_SUCCESS) return; // The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks. for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferCount; i++) { auto cb_node = Get<CMD_BUFFER_STATE>(submit->pCommandBuffers[i]); if (cb_node) { for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) { UpdateCmdBufImageLayouts(secondary_cmd_buffer); RecordQueuedQFOTransfers(secondary_cmd_buffer); } UpdateCmdBufImageLayouts(cb_node.get()); RecordQueuedQFOTransfers(cb_node.get()); } } } } void CoreChecks::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence, VkResult result) { StateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result); if (result != VK_SUCCESS) return; // The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks. for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { auto cb_node = Get<CMD_BUFFER_STATE>(submit->pCommandBufferInfos[i].commandBuffer); if (cb_node) { for (auto *secondaryCmdBuffer : cb_node->linkedCommandBuffers) { UpdateCmdBufImageLayouts(secondaryCmdBuffer); RecordQueuedQFOTransfers(secondaryCmdBuffer); } UpdateCmdBufImageLayouts(cb_node.get()); RecordQueuedQFOTransfers(cb_node.get()); } } } } struct SemaphoreSubmitState { const CoreChecks *core; VkQueueFlags queue_flags; layer_data::unordered_set<VkSemaphore> signaled_semaphores; layer_data::unordered_set<VkSemaphore> unsignaled_semaphores; layer_data::unordered_set<VkSemaphore> internal_semaphores; SemaphoreSubmitState(const CoreChecks *core_, VkQueueFlags queue_flags_) : core(core_), queue_flags(queue_flags_) {} bool CannotWait(const SEMAPHORE_STATE &semaphore_state) const { auto semaphore = semaphore_state.semaphore(); return unsignaled_semaphores.count(semaphore) || (!signaled_semaphores.count(semaphore) && !semaphore_state.CanBeWaited()); } bool ValidateBinaryWait(const core_error::Location &loc, VkQueue queue, const SEMAPHORE_STATE &semaphore_state) { bool skip = false; using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; auto semaphore = semaphore_state.semaphore(); if ((semaphore_state.Scope() == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (CannotWait(semaphore_state)) { auto last_op = semaphore_state.LastOp(); if (last_op) { if (last_op->IsWait()) { auto other_queue = last_op->queue->Handle(); const char *vuid = loc.function == core_error::Func::vkQueueSubmit ? "VUID-vkQueueSubmit-pWaitSemaphores-00068" : "VUID-vkQueueSubmit2KHR-semaphore-03871"; LogObjectList objlist(semaphore); objlist.add(queue); objlist.add(other_queue); skip |= core->LogError(objlist, vuid, "%s Queue %s is already waiting on semaphore (%s).", loc.Message().c_str(), core->report_data->FormatHandle(other_queue).c_str(), core->report_data->FormatHandle(semaphore).c_str()); } } else { auto error = IsExtEnabled(core->device_extensions.vk_khr_timeline_semaphore) ? SubmitError::kTimelineCannotBeSignalled : SubmitError::kBinaryCannotBeSignalled; const auto &vuid = GetQueueSubmitVUID(loc, error); LogObjectList objlist(semaphore); objlist.add(queue); skip |= core->LogError( objlist, semaphore_state.Scope() == kSyncScopeInternal ? vuid : kVUID_Core_DrawState_QueueForwardProgress, "%s Queue %s is waiting on semaphore (%s) that has no way to be signaled.", loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str()); } } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } else if (semaphore_state.Scope() == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } return skip; } bool ValidateWaitSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value, uint32_t device_Index) { bool skip = false; const auto semaphore_state = core->Get<SEMAPHORE_STATE>(semaphore); if (!semaphore_state) { return skip; } switch (semaphore_state->type) { case VK_SEMAPHORE_TYPE_BINARY: skip = ValidateBinaryWait(loc, queue, *semaphore_state); break; case VK_SEMAPHORE_TYPE_TIMELINE: skip |= core->ValidateMaxTimelineSemaphoreValueDifference(loc, *semaphore_state, value); break; default: break; } return skip; } bool CannotSignal(const SEMAPHORE_STATE &semaphore_state) const { const auto semaphore = semaphore_state.semaphore(); return signaled_semaphores.count(semaphore) || (!unsignaled_semaphores.count(semaphore) && !semaphore_state.CanBeSignaled()); } bool ValidateSignalSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value, uint32_t deviceIndex) { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; LogObjectList objlist(semaphore); objlist.add(queue); const auto semaphore_state = core->Get<SEMAPHORE_STATE>(semaphore); if (!semaphore_state) { return skip; } switch (semaphore_state->type) { case VK_SEMAPHORE_TYPE_BINARY: { if ((semaphore_state->Scope() == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (CannotSignal(*semaphore_state)) { VkQueue other_queue; const auto last_op = semaphore_state->LastOp(); if (last_op) { other_queue = last_op->queue ? last_op->queue->Queue() : VK_NULL_HANDLE; } else { const auto completed = semaphore_state->Completed(); other_queue = completed.queue ? completed.queue->Queue() : VK_NULL_HANDLE; } objlist.add(other_queue); skip |= core->LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress, "%s is signaling %s (%s) that was previously " "signaled by %s but has not since been waited on by any queue.", loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str(), core->report_data->FormatHandle(other_queue).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } break; } case VK_SEMAPHORE_TYPE_TIMELINE: { const auto completed = semaphore_state->Completed(); if (value <= completed.payload) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemSmallValue); skip |= core->LogError(objlist, vuid, "%s signal value (0x%" PRIx64 ") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ")", loc.Message().c_str(), completed.payload, core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str(), value); } else { skip |= core->ValidateMaxTimelineSemaphoreValueDifference(loc, *semaphore_state, value); } break; } default: break; } return skip; } }; bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo *submit, const Location &outer_loc) const { bool skip = false; auto *timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext); for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { uint64_t value = 0; uint32_t device_index = 0; // TODO: VkSemaphore semaphore = submit->pWaitSemaphores[i]; LogObjectList objlist(semaphore); objlist.add(queue); if (submit->pWaitDstStageMask) { auto loc = outer_loc.dot(Field::pWaitDstStageMask, i); skip |= ValidatePipelineStage(objlist, loc, state.queue_flags, submit->pWaitDstStageMask[i]); skip |= ValidateStageMaskHost(loc, submit->pWaitDstStageMask[i]); } const auto semaphore_state = Get<SEMAPHORE_STATE>(semaphore); if (!semaphore_state) { continue; } auto loc = outer_loc.dot(Field::pWaitSemaphores, i); if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) { if (timeline_semaphore_submit_info == nullptr) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239", "%s (%s) is a timeline semaphore, but VkSubmitInfo does " "not include an instance of VkTimelineSemaphoreSubmitInfo", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); continue; } else if (submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03240", "%s (%s) is a timeline semaphore, it contains an " "instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different than " "waitSemaphoreCount (%u)", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->waitSemaphoreValueCount, submit->waitSemaphoreCount); continue; } value = timeline_semaphore_submit_info->pWaitSemaphoreValues[i]; } skip |= state.ValidateWaitSemaphore(outer_loc.dot(Field::pWaitSemaphores, i), queue, semaphore, value, device_index); } for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; uint64_t value = 0; uint32_t device_index = 0; const auto semaphore_state = Get<SEMAPHORE_STATE>(semaphore); if (!semaphore_state) { continue; } auto loc = outer_loc.dot(Field::pSignalSemaphores, i); if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) { if (timeline_semaphore_submit_info == nullptr) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239", "%s (%s) is a timeline semaphore, but VkSubmitInfo" "does not include an instance of VkTimelineSemaphoreSubmitInfo", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); continue; } else if (submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03241", "%s (%s) is a timeline semaphore, it contains an " "instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different than " "signalSemaphoreCount (%u)", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->signalSemaphoreValueCount, submit->signalSemaphoreCount); continue; } value = timeline_semaphore_submit_info->pSignalSemaphoreValues[i]; } skip |= state.ValidateSignalSemaphore(loc, queue, semaphore, value, device_index); } return skip; } bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo2KHR *submit, const Location &outer_loc) const { bool skip = false; for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) { const auto &sem_info = submit->pWaitSemaphoreInfos[i]; Location loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i); skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags, sem_info.stageMask); skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask); skip |= state.ValidateWaitSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex); } for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) { const auto &sem_info = submit->pSignalSemaphoreInfos[i]; auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i); skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags, sem_info.stageMask); skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask); skip |= state.ValidateSignalSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex); } return skip; } static uint64_t TimelineDiff(uint64_t a, uint64_t b) { return a > b ? a - b : b - a; } bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(const Location &loc, const SEMAPHORE_STATE &semaphore_state, uint64_t value) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; if (semaphore_state.type != VK_SEMAPHORE_TYPE_TIMELINE) return false; const auto semaphore = semaphore_state.Handle(); const auto completed = semaphore_state.Completed(); uint64_t diff = TimelineDiff(value, completed.payload); if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value (%" PRIu64 ") exceeds limit regarding current semaphore %s payload (%" PRIu64 ").", loc.Message().c_str(), value, report_data->FormatHandle(semaphore).c_str(), completed.payload); } else if (semaphore_state.HasPendingOps()) { const auto last_op = semaphore_state.LastOp(); if (last_op && last_op->op_type != SEMAPHORE_STATE::kNone) { diff = TimelineDiff(value, last_op->payload); if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value (%" PRIu64 ") exceeds limit regarding pending semaphore %s %s value (%" PRIu64 ").", loc.Message().c_str(), value, report_data->FormatHandle(semaphore).c_str(), SEMAPHORE_STATE::OpTypeName(last_op->op_type), last_op->payload); } } } return skip; } struct CommandBufferSubmitState { const CoreChecks *core; const QUEUE_STATE *queue_state; QFOTransferCBScoreboards<QFOImageTransferBarrier> qfo_image_scoreboards; QFOTransferCBScoreboards<QFOBufferTransferBarrier> qfo_buffer_scoreboards; vector<VkCommandBuffer> current_cmds; GlobalImageLayoutMap overlay_image_layout_map; QueryMap local_query_to_state_map; EventToStageMap local_event_to_stage_map; CommandBufferSubmitState(const CoreChecks *c, const char *func, const QUEUE_STATE *q) : core(c), queue_state(q) {} bool Validate(const core_error::Location &loc, const CMD_BUFFER_STATE &cb_node, uint32_t perf_pass) { bool skip = false; skip |= core->ValidateCmdBufImageLayouts(loc, &cb_node, core->imageLayoutMap, overlay_image_layout_map); auto cmd = cb_node.commandBuffer(); current_cmds.push_back(cmd); skip |= core->ValidatePrimaryCommandBufferState(loc, &cb_node, static_cast<int>(std::count(current_cmds.begin(), current_cmds.end(), cmd)), &qfo_image_scoreboards, &qfo_buffer_scoreboards); skip |= core->ValidateQueueFamilyIndices(loc, &cb_node, queue_state->Queue()); for (const auto &descriptor_set : cb_node.validate_descriptorsets_in_queuesubmit) { const auto set_node = core->Get<cvdescriptorset::DescriptorSet>(descriptor_set.first); if (!set_node) { continue; } for (const auto &cmd_info : descriptor_set.second) { std::string function = loc.StringFunc(); function += ", "; function += CommandTypeString(cmd_info.cmd_type); for (const auto &binding_info : cmd_info.binding_infos) { std::string error; std::vector<uint32_t> dynamic_offsets; // dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty. // This submit time not record time... const bool record_time_validate = false; layer_data::optional<layer_data::unordered_map<VkImageView, VkImageLayout>> checked_layouts; if (set_node->GetTotalDescriptorCount() > cvdescriptorset::PrefilterBindRequestMap::kManyDescriptors_) { checked_layouts.emplace(); } skip |= core->ValidateDescriptorSetBindingData(&cb_node, set_node.get(), dynamic_offsets, binding_info, cmd_info.framebuffer, cmd_info.attachments.get(), cmd_info.subpasses.get(), record_time_validate, function.c_str(), core->GetDrawDispatchVuid(cmd_info.cmd_type), checked_layouts); } } } // Potential early exit here as bad object state may crash in delayed function calls if (skip) { return true; } // Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time) for (auto &function : cb_node.queue_submit_functions) { skip |= function(*core, *queue_state, cb_node); } for (auto &function : cb_node.eventUpdates) { skip |= function(core, /*do_validate*/ true, &local_event_to_stage_map); } VkQueryPool first_perf_query_pool = VK_NULL_HANDLE; for (auto &function : cb_node.queryUpdates) { skip |= function(core, /*do_validate*/ true, first_perf_query_pool, perf_pass, &local_query_to_state_map); } return skip; } }; bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) const { const auto fence_state = Get<FENCE_STATE>(fence); bool skip = ValidateFenceForSubmit(fence_state.get(), "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueSubmit-fence-00063", "vkQueueSubmit()"); if (skip) { return true; } const auto queue_state = Get<QUEUE_STATE>(queue); CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit()", queue_state.get()); SemaphoreSubmitState sem_submit_state(this, physical_device_state->queue_family_properties[queue_state->queueFamilyIndex].queueFlags); // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext); uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0; Location loc(Func::vkQueueSubmit, Struct::VkSubmitInfo, Field::pSubmits, submit_idx); for (uint32_t i = 0; i < submit->commandBufferCount; i++) { const auto cb_state = Get<CMD_BUFFER_STATE>(submit->pCommandBuffers[i]); skip |= cb_submit_state.Validate(loc.dot(Field::pCommandBuffers, i), *cb_state, perf_pass); } skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc); auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupSubmitInfo>(submit->pNext); if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) { for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], queue, "VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086"); } if (chained_device_group_struct->signalSemaphoreCount != submit->signalSemaphoreCount) { skip |= LogError(queue, "VUID-VkDeviceGroupSubmitInfo-signalSemaphoreCount-00084", "pSubmits[%" PRIu32 "] signalSemaphoreCount (%" PRIu32 ") is different than signalSemaphoreCount (%" PRIu32 ") of the VkDeviceGroupSubmitInfo in its pNext chain", submit_idx, submit->signalSemaphoreCount, chained_device_group_struct->signalSemaphoreCount); } if (chained_device_group_struct->waitSemaphoreCount != submit->waitSemaphoreCount) { skip |= LogError(queue, "VUID-VkDeviceGroupSubmitInfo-waitSemaphoreCount-00082", "pSubmits[%" PRIu32 "] waitSemaphoreCount (%" PRIu32 ") is different than waitSemaphoreCount (%" PRIu32 ") of the VkDeviceGroupSubmitInfo in its pNext chain", submit_idx, submit->waitSemaphoreCount, chained_device_group_struct->waitSemaphoreCount); } if (chained_device_group_struct->commandBufferCount != submit->commandBufferCount) { skip |= LogError(queue, "VUID-VkDeviceGroupSubmitInfo-commandBufferCount-00083", "pSubmits[%" PRIu32 "] commandBufferCount (%" PRIu32 ") is different than commandBufferCount (%" PRIu32 ") of the VkDeviceGroupSubmitInfo in its pNext chain", submit_idx, submit->commandBufferCount, chained_device_group_struct->commandBufferCount); } } auto protected_submit_info = LvlFindInChain<VkProtectedSubmitInfo>(submit->pNext); if (protected_submit_info) { const bool protected_submit = protected_submit_info->protectedSubmit == VK_TRUE; if ((protected_submit == true) && ((queue_state->flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) == 0)) { skip |= LogError(queue, "VUID-vkQueueSubmit-queue-06448", "vkQueueSubmit(): pSubmits[%u] contains a protected submission to %s which was not created with " "VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT", submit_idx, report_data->FormatHandle(queue).c_str()); } // Make sure command buffers are all protected or unprotected for (uint32_t i = 0; i < submit->commandBufferCount; i++) { const auto cb_state = Get<CMD_BUFFER_STATE>(submit->pCommandBuffers[i]); if (cb_state) { if ((cb_state->unprotected == true) && (protected_submit == true)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04148", "vkQueueSubmit(): command buffer %s is unprotected while queue %s pSubmits[%u] has " "VkProtectedSubmitInfo:protectedSubmit set to VK_TRUE", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } if ((cb_state->unprotected == false) && (protected_submit == false)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04120", "vkQueueSubmit(): command buffer %s is protected while queue %s pSubmits[%u] has " "VkProtectedSubmitInfo:protectedSubmit set to VK_FALSE", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } } } } } return skip; } bool CoreChecks::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence) const { const auto pFence = Get<FENCE_STATE>(fence); bool skip = ValidateFenceForSubmit(pFence.get(), "VUID-vkQueueSubmit2KHR-fence-04895", "VUID-vkQueueSubmit2KHR-fence-04894", "vkQueueSubmit2KHR()"); if (skip) { return true; } if (!enabled_features.synchronization2_features.synchronization2) { skip |= LogError(queue, "VUID-vkQueueSubmit2KHR-synchronization2-03866", "vkQueueSubmit2KHR(): Synchronization2 feature is not enabled"); } const auto queue_state = Get<QUEUE_STATE>(queue); CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit2KHR()", queue_state.get()); SemaphoreSubmitState sem_submit_state(this, physical_device_state->queue_family_properties[queue_state->queueFamilyIndex].queueFlags); // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext); uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0; Location loc(Func::vkQueueSubmit2KHR, Struct::VkSubmitInfo2KHR, Field::pSubmits, submit_idx); skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc); bool protected_submit = (submit->flags & VK_SUBMIT_PROTECTED_BIT_KHR) != 0; if ((protected_submit == true) && ((queue_state->flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT)) == 0) { skip |= LogError(queue, "VUID-vkQueueSubmit2KHR-queue-06447", "vkQueueSubmit2KHR(): pSubmits[%u] contains a protected submission to %s which was not created with " "VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT", submit_idx, report_data->FormatHandle(queue).c_str()); } for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { auto info_loc = loc.dot(Field::pCommandBufferInfos, i); info_loc.structure = Struct::VkCommandBufferSubmitInfoKHR; const auto cb_state = Get<CMD_BUFFER_STATE>(submit->pCommandBufferInfos[i].commandBuffer); skip |= cb_submit_state.Validate(info_loc.dot(Field::commandBuffer), *cb_state, perf_pass); skip |= ValidateDeviceMaskToPhysicalDeviceCount(submit->pCommandBufferInfos[i].deviceMask, queue, "VUID-VkCommandBufferSubmitInfoKHR-deviceMask-03891"); // Make sure command buffers are all protected or unprotected if (cb_state != nullptr) { if ((cb_state->unprotected == true) && (protected_submit == true)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03886", "vkQueueSubmit2KHR(): command buffer %s is unprotected while queue %s pSubmits[%u] has " "VK_SUBMIT_PROTECTED_BIT_KHR set", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } if ((cb_state->unprotected == false) && (protected_submit == false)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03887", "vkQueueSubmit2KHR(): command buffer %s is protected while queue %s pSubmitInfos[%u] has " "VK_SUBMIT_PROTECTED_BIT_KHR not set", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } } } } return skip; } #ifdef AHB_VALIDATION_SUPPORT // Android-specific validation that uses types defined only on Android and only for NDK versions // that support the VK_ANDROID_external_memory_android_hardware_buffer extension. // This chunk could move into a seperate core_validation_android.cpp file... ? // clang-format off // Map external format and usage flags to/from equivalent Vulkan flags // (Tables as of v1.1.92) // AHardwareBuffer Format Vulkan Format // ====================== ============= // AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM // AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16 // AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT // AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM // AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT // AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT // AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT // AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT // The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan // as uint32_t. Casting the enums here avoids scattering casts around in the code. std::map<uint32_t, VkFormat> ahb_format_map_a2v = { { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT } }; // AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!) // ===================== =================================================== // None VK_IMAGE_USAGE_TRANSFER_SRC_BIT // None VK_IMAGE_USAGE_TRANSFER_DST_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None // AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT // None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT // None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT // Same casting rationale. De-mixing the table to prevent type confusion and aliasing std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = { { VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER }, { VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER }, }; std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = { { VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP }, { VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT }, }; // clang-format on // // AHB-extension new APIs // bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const { bool skip = false; // buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags. AHardwareBuffer_Desc ahb_desc; AHardwareBuffer_describe(buffer, &ahb_desc); uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER; if (0 == (ahb_desc.usage & required_flags)) { skip |= LogError(device, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884", "vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64 ") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.", ahb_desc.usage); } return skip; } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) const { bool skip = false; const auto mem_info = Get<DEVICE_MEMORY_STATE>(pInfo->memory); // VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in // VkExportMemoryAllocateInfo::handleTypes when memory was created. if (!mem_info->IsExport() || (0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) { skip |= LogError(device, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882", "vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the " "export handleTypes (0x%" PRIx32 ") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.", report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags); } // If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo // with non-NULL image member, then that image must already be bound to memory. if (mem_info->IsDedicatedImage()) { const auto image_state = Get<IMAGE_STATE>(mem_info->dedicated->handle.Cast<VkImage>()); if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(mem_info->mem())))) { LogObjectList objlist(device); objlist.add(pInfo->memory); objlist.add(mem_info->dedicated->handle); skip |= LogError(objlist, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883", "vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated " "%s, but that image is not bound to the VkDeviceMemory object.", report_data->FormatHandle(pInfo->memory).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str()); } } return skip; } // // AHB-specific validation within non-AHB APIs // bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { bool skip = false; auto import_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext); auto exp_mem_alloc_info = LvlFindInChain<VkExportMemoryAllocateInfo>(alloc_info->pNext); auto mem_ded_alloc_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext); if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) { // This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID AHardwareBuffer_Desc ahb_desc = {}; AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc); // Validate AHardwareBuffer_Desc::usage is a valid usage for imported AHB // // BLOB & GPU_DATA_BUFFER combo specifically allowed if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { // Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables // Usage must have at least one bit from the table. It may have additional bits not in the table uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT; if (0 == (ahb_desc.usage & ahb_equiv_usage_bits)) { skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881", "vkAllocateMemory: The AHardwareBuffer_Desc's usage (0x%" PRIx64 ") is not compatible with Vulkan.", ahb_desc.usage); } } // Collect external buffer info auto pdebi = LvlInitStruct<VkPhysicalDeviceExternalBufferInfo>(); pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER]; } auto ext_buf_props = LvlInitStruct<VkExternalBufferProperties>(); DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props); // If buffer is not NULL, Android hardware buffers must be supported for import, as reported by // VkExternalImageFormatProperties or VkExternalBufferProperties. if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) { // Collect external format info auto pdeifi = LvlInitStruct<VkPhysicalDeviceExternalImageFormatInfo>(); pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; auto pdifi2 = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&pdeifi); if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format]; pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER]; } if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP]; } if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT]; } auto ext_img_fmt_props = LvlInitStruct<VkExternalImageFormatProperties>(); auto ifp2 = LvlInitStruct<VkImageFormatProperties2>(&ext_img_fmt_props); VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2); if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) { skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880", "vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties " "structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag."); } } // Retrieve buffer and format properties of the provided AHardwareBuffer auto ahb_format_props = LvlInitStruct<VkAndroidHardwareBufferFormatPropertiesANDROID>(); auto ahb_props = LvlInitStruct<VkAndroidHardwareBufferPropertiesANDROID>(&ahb_format_props); DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props); // allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer if (alloc_info->allocationSize != ahb_props.allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-02383", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, allocationSize (%" PRId64 ") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").", alloc_info->allocationSize, ahb_props.allocationSize); } // memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer // Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex; if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, memoryTypeIndex (%" PRId32 ") does not correspond to a bit set in AHardwareBuffer's reported " "memoryTypeBits bitmask (0x%" PRIx32 ").", alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits); } // Checks for allocations without a dedicated allocation requirement if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) { // the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes // AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02384", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not " "AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.", ahb_desc.format, ahb_desc.usage); } } else { // Checks specific to import with a dedicated allocation requirement auto image_state = Get<IMAGE_STATE>(mem_ded_alloc_info->image); const auto *ici = &image_state->createInfo; // The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER or // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02386", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a " "dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64 ") contains neither AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.", ahb_desc.usage); } // the format of image must be VK_FORMAT_UNDEFINED or the format returned by // vkGetAndroidHardwareBufferPropertiesANDROID if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02387", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).", string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format)); } // The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) || (ici->arrayLayers != ahb_desc.layers)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02388", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32 ") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").", ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height, ahb_desc.layers); } // If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must // have either a full mipmap chain or exactly 1 mip level. // // NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead, // its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates // that the Android hardware buffer contains only a single mip level." // // TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct. // Clarification requested. if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) && (ici->mipLevels != FullMipChainLevels(ici->extent))) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02389", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32 ") is neither 1 nor full mip " "chain levels (%" PRId32 ").", ici->mipLevels, FullMipChainLevels(ici->extent)); } // each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a // corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's // AHardwareBuffer_Desc::usage if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "dedicated image usage bits (0x%" PRIx32 ") include an issue not listed in the AHardwareBuffer Usage Equivalence table.", ici->usage); } std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT}; for (VkImageUsageFlags ubit : usages) { if (ici->usage & ubit) { uint64_t ahb_usage = ahb_usage_map_v2a[ubit]; if (0 == (ahb_usage & ahb_desc.usage)) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "The dedicated image usage bit %s equivalent is not in AHardwareBuffer_Desc.usage (0x%" PRIx64 ") ", string_VkImageUsageFlags(ubit).c_str(), ahb_desc.usage); } } } } } else { // Not an import if ((exp_mem_alloc_info) && (mem_ded_alloc_info) && (0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) && (VK_NULL_HANDLE != mem_ded_alloc_info->image)) { // This is an Android HW Buffer export if (0 != alloc_info->allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, " "but allocationSize is non-zero."); } } else { if (0 == alloc_info->allocationSize) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0."); }; } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { bool skip = false; const auto image_state = Get<IMAGE_STATE>(image); if (image_state != nullptr) { if (image_state->IsExternalAHB() && (0 == image_state->GetBoundMemory().size())) { const char *vuid = strcmp(func_name, "vkGetImageMemoryRequirements()") == 0 ? "VUID-vkGetImageMemoryRequirements-image-04004" : "VUID-VkImageMemoryRequirementsInfo2-image-01897"; skip |= LogError(image, vuid, "%s: Attempt get image memory requirements for an image created with a " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been " "bound to memory.", func_name); } } return skip; } bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID( const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const { bool skip = false; const VkAndroidHardwareBufferUsageANDROID *ahb_usage = LvlFindInChain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext); if (nullptr != ahb_usage) { const VkPhysicalDeviceExternalImageFormatInfo *pdeifi = LvlFindInChain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext); if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) { skip |= LogError(device, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868", "vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained " "VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained " "VkPhysicalDeviceExternalImageFormatInfo struct with handleType " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID."); } } return skip; } bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkBuffer buffer) const { bool skip = false; if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) { const char *vuid = (strcmp(func_name, "vkBindBufferMemory()") == 0) ? "VUID-vkBindBufferMemory-memory-02986" : "VUID-VkBindBufferMemoryInfo-memory-02986"; LogObjectList objlist(buffer); objlist.add(memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkBuffer (%s) " "VkExternalMemoryBufferreateInfo::handleType (%s)", func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(handleType).c_str()); } return skip; } bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkImage image) const { bool skip = false; if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) { const char *vuid = (strcmp(func_name, "vkBindImageMemory()") == 0) ? "VUID-vkBindImageMemory-memory-02990" : "VUID-VkBindImageMemoryInfo-memory-02990"; LogObjectList objlist(image); objlist.add(memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkImage (%s) " "VkExternalMemoryImageCreateInfo::handleType (%s)", func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(image).c_str(), string_VkExternalMemoryHandleTypeFlags(handleType).c_str()); } return skip; } #else // !AHB_VALIDATION_SUPPORT // Case building for Android without AHB Validation #ifdef VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const { return false; } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) const { return false; } #endif // VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; } bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID( const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const { return false; } bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { return false; } bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkBuffer buffer) const { return false; } bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkImage image) const { return false; } #endif // AHB_VALIDATION_SUPPORT bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const { bool skip = false; if (Count<DEVICE_MEMORY_STATE>() >= phys_dev_props.limits.maxMemoryAllocationCount) { skip |= LogError(device, "VUID-vkAllocateMemory-maxMemoryAllocationCount-04101", "vkAllocateMemory: Number of currently valid memory objects is not less than the maximum allowed (%u).", phys_dev_props.limits.maxMemoryAllocationCount); } if (IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { skip |= ValidateAllocateMemoryANDROID(pAllocateInfo); } else { if (0 == pAllocateInfo->allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0."); }; } auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext); if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675"); skip |= ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676"); } if (pAllocateInfo->memoryTypeIndex >= phys_dev_mem_props.memoryTypeCount) { skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01714", "vkAllocateMemory: attempting to allocate memory type %u, which is not a valid index. Device only " "advertises %u memory types.", pAllocateInfo->memoryTypeIndex, phys_dev_mem_props.memoryTypeCount); } else { const VkMemoryType memory_type = phys_dev_mem_props.memoryTypes[pAllocateInfo->memoryTypeIndex]; if (pAllocateInfo->allocationSize > phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size) { skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01713", "vkAllocateMemory: attempting to allocate %" PRIu64 " bytes from heap %u," "but size of that heap is only %" PRIu64 " bytes.", pAllocateInfo->allocationSize, memory_type.heapIndex, phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size); } if (!enabled_features.device_coherent_memory_features.deviceCoherentMemory && ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) != 0)) { skip |= LogError(device, "VUID-vkAllocateMemory-deviceCoherentMemory-02790", "vkAllocateMemory: attempting to allocate memory type %u, which includes the " "VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD memory property, but the deviceCoherentMemory feature " "is not enabled.", pAllocateInfo->memoryTypeIndex); } if ((enabled_features.core11.protectedMemory == VK_FALSE) && ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872", "vkAllocateMemory(): attempting to allocate memory type %u, which includes the " "VK_MEMORY_PROPERTY_PROTECTED_BIT memory property, but the protectedMemory feature " "is not enabled.", pAllocateInfo->memoryTypeIndex); } } bool imported_ahb = false; #ifdef AHB_VALIDATION_SUPPORT // "memory is not an imported Android Hardware Buffer" refers to VkImportAndroidHardwareBufferInfoANDROID with a non-NULL // buffer value. Memory imported has another VUID to check size and allocationSize match up auto imported_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext); if (imported_ahb_info != nullptr) { imported_ahb = imported_ahb_info->buffer != nullptr; } #endif // AHB_VALIDATION_SUPPORT auto dedicated_allocate_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(pAllocateInfo->pNext); if (dedicated_allocate_info) { if ((dedicated_allocate_info->buffer != VK_NULL_HANDLE) && (dedicated_allocate_info->image != VK_NULL_HANDLE)) { skip |= LogError(device, "VUID-VkMemoryDedicatedAllocateInfo-image-01432", "vkAllocateMemory: Either buffer or image has to be VK_NULL_HANDLE in VkMemoryDedicatedAllocateInfo"); } else if (dedicated_allocate_info->image != VK_NULL_HANDLE) { // Dedicated VkImage const auto image_state = Get<IMAGE_STATE>(dedicated_allocate_info->image); if (image_state->disjoint == true) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-image-01797", "vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_IMAGE_CREATE_DISJOINT_BIT", report_data->FormatHandle(dedicated_allocate_info->image).c_str()); } else { if ((pAllocateInfo->allocationSize != image_state->requirements[0].size) && (imported_ahb == false)) { const char *vuid = IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkMemoryDedicatedAllocateInfo-image-02964" : "VUID-VkMemoryDedicatedAllocateInfo-image-01433"; skip |= LogError(device, vuid, "vkAllocateMemory: Allocation Size (%" PRIu64 ") needs to be equal to VkImage %s VkMemoryRequirements::size (%" PRIu64 ")", pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->image).c_str(), image_state->requirements[0].size); } if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != 0) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-image-01434", "vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_IMAGE_CREATE_SPARSE_BINDING_BIT", report_data->FormatHandle(dedicated_allocate_info->image).c_str()); } } } else if (dedicated_allocate_info->buffer != VK_NULL_HANDLE) { // Dedicated VkBuffer const auto buffer_state = Get<BUFFER_STATE>(dedicated_allocate_info->buffer); if ((pAllocateInfo->allocationSize != buffer_state->requirements.size) && (imported_ahb == false)) { const char *vuid = IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkMemoryDedicatedAllocateInfo-buffer-02965" : "VUID-VkMemoryDedicatedAllocateInfo-buffer-01435"; skip |= LogError( device, vuid, "vkAllocateMemory: Allocation Size (%" PRIu64 ") needs to be equal to VkBuffer %s VkMemoryRequirements::size (%" PRIu64 ")", pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->buffer).c_str(), buffer_state->requirements.size); } if ((buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != 0) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-buffer-01436", "vkAllocateMemory: VkBuffer %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_BUFFER_CREATE_SPARSE_BINDING_BIT", report_data->FormatHandle(dedicated_allocate_info->buffer).c_str()); } } } // TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744 return skip; } // For given obj node, if it is use, flag a validation error and return callback result, else return false bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const char *caller_name, const char *error_code) const { if (disabled[object_in_use]) return false; auto obj_struct = obj_node->Handle(); bool skip = false; if (obj_node->InUse()) { skip |= LogError(device, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name, report_data->FormatHandle(obj_struct).c_str()); } return skip; } bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const { const auto mem_info = Get<DEVICE_MEMORY_STATE>(mem); bool skip = false; if (mem_info) { skip |= ValidateObjectNotInUse(mem_info.get(), "vkFreeMemory", "VUID-vkFreeMemory-memory-00677"); } return skip; } // Validate that given Map memory range is valid. This means that the memory should not already be mapped, // and that the size of the map range should be: // 1. Not zero // 2. Within the size of the memory allocation bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const { bool skip = false; assert(mem_info); const auto mem = mem_info->mem(); if (size == 0) { skip = LogError(mem, "VUID-vkMapMemory-size-00680", "VkMapMemory: Attempting to map memory range of size zero"); } // It is an application error to call VkMapMemory on an object that is already mapped if (mem_info->mapped_range.size != 0) { skip = LogError(mem, "VUID-vkMapMemory-memory-00678", "VkMapMemory: Attempting to map memory on an already-mapped %s.", report_data->FormatHandle(mem).c_str()); } // Validate offset is not over allocaiton size if (offset >= mem_info->alloc_info.allocationSize) { skip = LogError(mem, "VUID-vkMapMemory-offset-00679", "VkMapMemory: Attempting to map memory with an offset of 0x%" PRIx64 " which is larger than the total array size 0x%" PRIx64, offset, mem_info->alloc_info.allocationSize); } // Validate that offset + size is within object's allocationSize if (size != VK_WHOLE_SIZE) { if ((offset + size) > mem_info->alloc_info.allocationSize) { skip = LogError(mem, "VUID-vkMapMemory-size-00681", "VkMapMemory: Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".", offset, size + offset, mem_info->alloc_info.allocationSize); } } return skip; } bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) const { bool skip = false; skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", "VUID-vkGetDeviceQueue-queueFamilyIndex-00384"); for (size_t i = 0; i < device_queue_info_list.size(); i++) { const auto device_queue_info = device_queue_info_list.at(i); if (device_queue_info.queue_family_index != queueFamilyIndex) { continue; } // flag must be zero if (device_queue_info.flags != 0) { skip |= LogError( device, "VUID-vkGetDeviceQueue-flags-01841", "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") was created with a non-zero VkDeviceQueueCreateFlags in vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32 "]. Need to use vkGetDeviceQueue2 instead.", queueIndex, device_queue_info.index); } if (device_queue_info.queue_count <= queueIndex) { skip |= LogError(device, "VUID-vkGetDeviceQueue-queueIndex-00385", "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32 ") when the device was created vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32 "] (i.e. is not less than %" PRIu32 ").", queueIndex, queueFamilyIndex, device_queue_info.index, device_queue_info.queue_count); } } return skip; } bool CoreChecks::PreCallValidateGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) const { bool skip = false; if (pQueueInfo) { const uint32_t queueFamilyIndex = pQueueInfo->queueFamilyIndex; const uint32_t queueIndex = pQueueInfo->queueIndex; const VkDeviceQueueCreateFlags flags = pQueueInfo->flags; skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue2", "pQueueInfo->queueFamilyIndex", "VUID-VkDeviceQueueInfo2-queueFamilyIndex-01842"); // ValidateDeviceQueueFamily() already checks if queueFamilyIndex but need to make sure flags match with it bool valid_flags = false; for (size_t i = 0; i < device_queue_info_list.size(); i++) { const auto device_queue_info = device_queue_info_list.at(i); // vkGetDeviceQueue2 only checks if both family index AND flags are same as device creation // this handle case where the same queueFamilyIndex is used with/without the protected flag if ((device_queue_info.queue_family_index != queueFamilyIndex) || (device_queue_info.flags != flags)) { continue; } valid_flags = true; if (device_queue_info.queue_count <= queueIndex) { skip |= LogError( device, "VUID-VkDeviceQueueInfo2-queueIndex-01843", "vkGetDeviceQueue2: queueIndex (=%" PRIu32 ") is not less than the number of queues requested from [queueFamilyIndex (=%" PRIu32 "), flags (%s)] combination when the device was created vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32 "] (i.e. is not less than %" PRIu32 ").", queueIndex, queueFamilyIndex, string_VkDeviceQueueCreateFlags(flags).c_str(), device_queue_info.index, device_queue_info.queue_count); } } // Don't double error message if already skipping from ValidateDeviceQueueFamily if (!valid_flags && !skip) { skip |= LogError(device, "VUID-VkDeviceQueueInfo2-flags-06225", "vkGetDeviceQueue2: The combination of queueFamilyIndex (=%" PRIu32 ") and flags (%s) were never both set together in any element of " "vkCreateDevice::pCreateInfo->pQueueCreateInfos at device creation time.", queueFamilyIndex, string_VkDeviceQueueCreateFlags(flags).c_str()); } } return skip; } bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const { bool skip = false; auto *sem_type_create_info = LvlFindInChain<VkSemaphoreTypeCreateInfo>(pCreateInfo->pNext); if (sem_type_create_info) { if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE && !enabled_features.core12.timelineSemaphore) { skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-timelineSemaphore-03252", "VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores"); } if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY && sem_type_create_info->initialValue != 0) { skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-semaphoreType-03279", "vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY, initialValue must be zero"); } } return skip; } bool CoreChecks::PreCallValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const { return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphores"); } bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const { return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphoresKHR"); } bool CoreChecks::ValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout, const char *apiName) const { bool skip = false; for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) { auto semaphore_state = Get<SEMAPHORE_STATE>(pWaitInfo->pSemaphores[i]); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(pWaitInfo->pSemaphores[i], "VUID-VkSemaphoreWaitInfo-pSemaphores-03256", "%s(): all semaphores in pWaitInfo must be timeline semaphores, but %s is not", apiName, report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const { const auto fence_node = Get<FENCE_STATE>(fence); bool skip = false; if (fence_node) { if (fence_node->Scope() == kSyncScopeInternal && fence_node->State() == FENCE_INFLIGHT) { skip |= LogError(fence, "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) const { const auto sema_node = Get<SEMAPHORE_STATE>(semaphore); bool skip = false; if (sema_node) { skip |= ValidateObjectNotInUse(sema_node.get(), "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137"); } return skip; } bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const { const auto event_state = Get<EVENT_STATE>(event); bool skip = false; if (event_state) { skip |= ValidateObjectNotInUse(event_state.get(), "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145"); } return skip; } bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) const { if (disabled[query_validation]) return false; const auto qp_state = Get<QUERY_POOL_STATE>(queryPool); bool skip = false; if (qp_state) { bool completed_by_get_results = true; for (uint32_t i = 0; i < qp_state->createInfo.queryCount; ++i) { auto state = qp_state->GetQueryState(i, 0); if (state != QUERYSTATE_AVAILABLE) { completed_by_get_results = false; break; } } if (!completed_by_get_results) { skip |= ValidateObjectNotInUse(qp_state.get(), "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793"); } } return skip; } bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state, uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const { bool skip = false; if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) { string invalid_flags_string; for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) { if (flag & flags) { if (invalid_flags_string.size()) { invalid_flags_string += " and "; } invalid_flags_string += string_VkQueryResultFlagBits(flag); } } skip |= LogError(query_pool_state->pool(), strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230" : "VUID-vkCmdCopyQueryPoolResults-queryType-03233", "%s: QueryPool %s was created with a queryType of" "VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.", cmd_name, report_data->FormatHandle(query_pool_state->pool()).c_str(), invalid_flags_string.c_str()); } for (uint32_t query_index = firstQuery; query_index < queryCount; query_index++) { uint32_t submitted = 0; for (uint32_t pass_index = 0; pass_index < query_pool_state->n_performance_passes; pass_index++) { auto state = query_pool_state->GetQueryState(query_index, pass_index); if (state == QUERYSTATE_AVAILABLE) { submitted++; } } if (submitted < query_pool_state->n_performance_passes) { skip |= LogError(query_pool_state->pool(), "VUID-vkGetQueryPoolResults-queryType-03231", "%s: QueryPool %s has %u performance query passes, but the query has only been " "submitted for %u of the passes.", cmd_name, report_data->FormatHandle(query_pool_state->pool()).c_str(), query_pool_state->n_performance_passes, submitted); } } return skip; } bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, void *pData, VkDeviceSize stride, VkQueryResultFlags flags, const char *apiName) const { bool skip = false; const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip; if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 || (stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-03229", "%s(): QueryPool %s was created with a queryType of " "VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the " "size of VkPerformanceCounterResultKHR.", apiName, report_data->FormatHandle(queryPool).c_str()); } skip |= ValidatePerformanceQueryResults(apiName, query_pool_state.get(), firstQuery, queryCount, flags); return skip; } bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) const { if (disabled[query_validation]) return false; bool skip = false; skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride, "dataSize", dataSize, flags); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkGetQueryPoolResults()", "VUID-vkGetQueryPoolResults-firstQuery-00813", "VUID-vkGetQueryPoolResults-firstQuery-00816"); skip |= ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags, "vkGetQueryPoolResults"); const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (query_pool_state) { if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) { skip |= LogError( queryPool, "VUID-vkGetQueryPoolResults-queryType-00818", "%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } if (!skip) { uint32_t query_avail_data = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0; uint32_t query_size_in_bytes = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t); uint32_t query_items = 0; uint32_t query_size = 0; switch (query_pool_state->createInfo.queryType) { case VK_QUERY_TYPE_OCCLUSION: // Occlusion queries write one integer value - the number of samples passed. query_items = 1; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_PIPELINE_STATISTICS: // Pipeline statistics queries write one integer value for each bit that is enabled in the pipelineStatistics // when the pool is created { const int num_bits = sizeof(VkFlags) * CHAR_BIT; std::bitset<num_bits> pipe_stats_bits(query_pool_state->createInfo.pipelineStatistics); query_items = static_cast<uint32_t>(pipe_stats_bits.count()); query_size = query_size_in_bytes * (query_items + query_avail_data); } break; case VK_QUERY_TYPE_TIMESTAMP: // Timestamp queries write one integer query_items = 1; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: // Transform feedback queries write two integers query_items = 2; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: // Performance queries store results in a tightly packed array of VkPerformanceCounterResultsKHR query_items = query_pool_state->perf_counter_index_count; query_size = sizeof(VkPerformanceCounterResultKHR) * query_items; if (query_size > stride) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04519", "vkGetQueryPoolResults() on querypool %s specified stride %" PRIu64 " which must be at least counterIndexCount (%d) " "multiplied by sizeof(VkPerformanceCounterResultKHR) (%zu).", report_data->FormatHandle(queryPool).c_str(), stride, query_items, sizeof(VkPerformanceCounterResultKHR)); } break; // These cases intentionally fall through to the default case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: // VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: default: query_size = 0; break; } if (query_size && (((queryCount - 1) * stride + query_size) > dataSize)) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-dataSize-00817", "vkGetQueryPoolResults() on querypool %s specified dataSize %zu which is " "incompatible with the specified query type and options.", report_data->FormatHandle(queryPool).c_str(), dataSize); } } if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR && (flags & VK_QUERY_RESULT_WITH_STATUS_BIT_KHR) == 0) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04810", "vkGetQueryPoolResults(): querypool %s was created with VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR " "queryType, but flags do not contain VK_QUERY_RESULT_WITH_STATUS_BIT_KHR bit.", report_data->FormatHandle(queryPool).c_str()); } } return skip; } bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize memoryOffset, const char *api_name) const { bool skip = false; if (memoryOffset >= mem_info->alloc_info.allocationSize) { const char *error_code = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { if (strcmp(api_name, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-memoryOffset-01031"; } else { error_code = "VUID-VkBindBufferMemoryInfo-memoryOffset-01031"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { if (strcmp(api_name, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-memoryOffset-01046"; } else { error_code = "VUID-VkBindImageMemoryInfo-memoryOffset-01046"; } } else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) { error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03621"; } else { // Unsupported object type assert(false); } LogObjectList objlist(mem_info->mem()); objlist.add(typed_handle); skip = LogError(objlist, error_code, "In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".", api_name, report_data->FormatHandle(mem_info->mem()).c_str(), report_data->FormatHandle(typed_handle).c_str(), memoryOffset, mem_info->alloc_info.allocationSize); } return skip; } bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName, const char *msgCode) const { bool skip = false; if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) { skip = LogError(mem_info->mem(), msgCode, "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory " "type (0x%X) of %s.", funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, report_data->FormatHandle(mem_info->mem()).c_str()); } return skip; } bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, const char *api_name) const { const auto buffer_state = Get<BUFFER_STATE>(buffer); bool bind_buffer_mem_2 = strcmp(api_name, "vkBindBufferMemory()") != 0; bool skip = false; if (buffer_state) { // Track objects tied to memory skip = ValidateSetMemBinding(mem, *buffer_state, api_name); const auto mem_info = Get<DEVICE_MEMORY_STATE>(mem); // Validate memory requirements alignment if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memoryOffset-01036" : "VUID-vkBindBufferMemory-memoryOffset-01036"; skip |= LogError(buffer, vuid, "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, memoryOffset, buffer_state->requirements.alignment); } if (mem_info) { // Validate bound memory range information skip |= ValidateInsertBufferMemoryRange(buffer, mem_info.get(), memoryOffset, api_name); const char *mem_type_vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01035" : "VUID-vkBindBufferMemory-memory-01035"; skip |= ValidateMemoryTypes(mem_info.get(), buffer_state->requirements.memoryTypeBits, api_name, mem_type_vuid); // Validate memory requirements size if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-size-01037" : "VUID-vkBindBufferMemory-size-01037"; skip |= LogError(buffer, vuid, "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size); } // Validate dedicated allocation if (mem_info->IsDedicatedBuffer() && ((mem_info->dedicated->handle.Cast<VkBuffer>() != buffer) || (memoryOffset != 0))) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01508" : "VUID-vkBindBufferMemory-memory-01508"; LogObjectList objlist(buffer); objlist.add(mem); objlist.add(mem_info->dedicated->handle); skip |= LogError(objlist, vuid, "%s: for dedicated %s, VkMemoryDedicatedAllocateInfo::buffer %s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str(), report_data->FormatHandle(buffer).c_str(), memoryOffset); } auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext); if (enabled_features.core12.bufferDeviceAddress && (buffer_state->createInfo.usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT) && (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT))) { skip |= LogError(buffer, "VUID-vkBindBufferMemory-bufferDeviceAddress-03339", "%s: If buffer was created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT bit set, " "memory must have been allocated with the VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT bit set.", api_name); } // Validate export memory handles if ((mem_info->export_handle_type_flags != 0) && ((mem_info->export_handle_type_flags & buffer_state->external_memory_handle) == 0)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-02726" : "VUID-vkBindBufferMemory-memory-02726"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least one " "handle from VkBuffer (%s) handleType %s.", api_name, report_data->FormatHandle(mem).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str()); } // Validate import memory handles if (mem_info->IsImportAHB() == true) { skip |= ValidateBufferImportedHandleANDROID(api_name, buffer_state->external_memory_handle, mem, buffer); } else if (mem_info->IsImport() == true) { if ((mem_info->import_handle_type_flags & buffer_state->external_memory_handle) == 0) { const char *vuid = nullptr; if ((bind_buffer_mem_2) && IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindBufferMemoryInfo-memory-02985"; } else if ((!bind_buffer_mem_2) && IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindBufferMemory-memory-02985"; } else if ((bind_buffer_mem_2) && !IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindBufferMemoryInfo-memory-02727"; } else if ((!bind_buffer_mem_2) && !IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindBufferMemory-memory-02727"; } LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s which " "is not set in the VkBuffer (%s) VkExternalMemoryBufferCreateInfo::handleType (%s)", api_name, report_data->FormatHandle(mem).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str()); } } // Validate mix of protected buffer and memory if ((buffer_state->unprotected == false) && (mem_info->unprotected == true)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01898" : "VUID-vkBindBufferMemory-None-01898"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was not created with protected memory but the VkBuffer (%s) was set " "to use protected memory.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str()); } else if ((buffer_state->unprotected == true) && (mem_info->unprotected == false)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01899" : "VUID-vkBindBufferMemory-None-01899"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with protected memory but the VkBuffer (%s) was not set " "to use protected memory.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) const { const char *api_name = "vkBindBufferMemory()"; return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name); } bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) const { bool skip = false; if (IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { skip |= ValidateGetImageMemoryRequirementsANDROID(image, "vkGetImageMemoryRequirements()"); } const auto image_state = Get<IMAGE_STATE>(image); if (image_state) { // Checks for no disjoint bit if (image_state->disjoint == true) { skip |= LogError(image, "VUID-vkGetImageMemoryRequirements-image-01588", "vkGetImageMemoryRequirements(): %s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT " "(need to use vkGetImageMemoryRequirements2).", report_data->FormatHandle(image).c_str()); } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo, const char *func_name) const { bool skip = false; if (IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { skip |= ValidateGetImageMemoryRequirementsANDROID(pInfo->image, func_name); } const auto image_state = Get<IMAGE_STATE>(pInfo->image); const VkFormat image_format = image_state->createInfo.format; const VkImageTiling image_tiling = image_state->createInfo.tiling; const VkImagePlaneMemoryRequirementsInfo *image_plane_info = LvlFindInChain<VkImagePlaneMemoryRequirementsInfo>(pInfo->pNext); if ((FormatIsMultiplane(image_format)) && (image_state->disjoint == true) && (image_plane_info == nullptr)) { skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01589", "%s: %s image was created with a multi-planar format (%s) and " "VK_IMAGE_CREATE_DISJOINT_BIT, but the current pNext doesn't include a " "VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format)); } if ((image_state->disjoint == false) && (image_plane_info != nullptr)) { skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01590", "%s: %s image was not created with VK_IMAGE_CREATE_DISJOINT_BIT," "but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str()); } if ((FormatIsMultiplane(image_format) == false) && (image_tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) && (image_plane_info != nullptr)) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_image_drm_format_modifier) ? "VUID-VkImageMemoryRequirementsInfo2-image-02280" : "VUID-VkImageMemoryRequirementsInfo2-image-01591"; skip |= LogError(pInfo->image, vuid, "%s: %s image is a single-plane format (%s) and does not have tiling of " "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT," "but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format)); } if (image_plane_info != nullptr) { if ((image_tiling == VK_IMAGE_TILING_LINEAR) || (image_tiling == VK_IMAGE_TILING_OPTIMAL)) { // Make sure planeAspect is only a single, valid plane uint32_t planes = FormatPlaneCount(image_format); VkImageAspectFlags aspect = image_plane_info->planeAspect; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) { skip |= LogError( pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281", "%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT.", func_name, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) { skip |= LogError( pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281", "%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.", func_name, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) const { return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2()"); } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) const { return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2KHR()"); } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) const { const auto pipeline_state = Get<PIPELINE_STATE>(pipeline); bool skip = false; if (pipeline_state) { skip |= ValidateObjectNotInUse(pipeline_state.get(), "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765"); } return skip; } bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const { const auto sampler_state = Get<SAMPLER_STATE>(sampler); bool skip = false; if (sampler_state) { skip |= ValidateObjectNotInUse(sampler_state.get(), "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082"); } return skip; } bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) const { const auto desc_pool_state = Get<DESCRIPTOR_POOL_STATE>(descriptorPool); bool skip = false; if (desc_pool_state) { skip |= ValidateObjectNotInUse(desc_pool_state.get(), "vkDestroyDescriptorPool", "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); } return skip; } // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result // If this is a secondary command buffer, then make sure its primary is also in-flight // If primary is not in-flight, then remove secondary from global in-flight set // This function is only valid at a point when cmdBuffer is being reset or freed bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const { bool skip = false; if (cb_node->InUse()) { skip |= LogError(cb_node->commandBuffer(), error_code, "Attempt to %s %s which is in use.", action, report_data->FormatHandle(cb_node->commandBuffer()).c_str()); } return skip; } // Iterate over all cmdBuffers in given commandPool and verify that each is not in use bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const { bool skip = false; for (auto &entry : pPool->commandBuffers) { const auto cb_state = entry.second; skip |= CheckCommandBufferInFlight(cb_state, action, error_code); } return skip; } bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) const { bool skip = false; for (uint32_t i = 0; i < commandBufferCount; i++) { const auto cb_node = Get<CMD_BUFFER_STATE>(pCommandBuffers[i]); // Delete CB information structure, and remove from commandBufferMap if (cb_node) { skip |= CheckCommandBufferInFlight(cb_node.get(), "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047"); } } return skip; } bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const { bool skip = false; skip |= ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex", "VUID-vkCreateCommandPool-queueFamilyIndex-01937"); if ((enabled_features.core11.protectedMemory == VK_FALSE) && ((pCreateInfo->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) != 0)) { skip |= LogError(device, "VUID-VkCommandPoolCreateInfo-flags-02860", "vkCreateCommandPool(): the protectedMemory device feature is disabled: CommandPools cannot be created " "with the VK_COMMAND_POOL_CREATE_PROTECTED_BIT set."); } return skip; } bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const { if (disabled[query_validation]) return false; bool skip = false; if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) { if (!enabled_features.core.pipelineStatisticsQuery) { skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00791", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with " "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE."); } } if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (!enabled_features.performance_query_features.performanceCounterQueryPools) { skip |= LogError(device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with " "VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE."); } auto perf_ci = LvlFindInChain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext); if (!perf_ci) { skip |= LogError( device, "VUID-VkQueryPoolCreateInfo-queryType-03222", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of " "pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR."); } else { const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex); if (perf_counter_iter == physical_device_state->perf_counters.end()) { skip |= LogError( device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236", "vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index."); } else { const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get(); for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) { if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) { skip |= LogError( device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321", "vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid " "counter index.", idx, perf_ci->pCounterIndices[idx]); } } } } } return skip; } bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) const { const auto cp_state = Get<COMMAND_POOL_STATE>(commandPool); bool skip = false; if (cp_state) { // Verify that command buffers in pool are complete (not in-flight) skip |= CheckCommandBuffersInFlight(cp_state.get(), "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041"); } return skip; } bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const { const auto command_pool_state = Get<COMMAND_POOL_STATE>(commandPool); return CheckCommandBuffersInFlight(command_pool_state.get(), "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040"); } bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const { bool skip = false; for (uint32_t i = 0; i < fenceCount; ++i) { const auto fence_state = Get<FENCE_STATE>(pFences[i]); if (fence_state && fence_state->Scope() == kSyncScopeInternal && fence_state->State() == FENCE_INFLIGHT) { skip |= LogError(pFences[i], "VUID-vkResetFences-pFences-01123", "%s is in use.", report_data->FormatHandle(pFences[i]).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) const { const auto framebuffer_state = Get<FRAMEBUFFER_STATE>(framebuffer); bool skip = false; if (framebuffer_state) { skip |= ValidateObjectNotInUse(framebuffer_state.get(), "vkDestroyFramebuffer", "VUID-vkDestroyFramebuffer-framebuffer-00892"); } return skip; } bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) const { const auto rp_state = Get<RENDER_PASS_STATE>(renderPass); bool skip = false; if (rp_state) { skip |= ValidateObjectNotInUse(rp_state.get(), "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873"); } return skip; } // Access helper functions for external modules VkFormatProperties3KHR CoreChecks::GetPDFormatProperties(const VkFormat format) const { auto fmt_props_3 = LvlInitStruct<VkFormatProperties3KHR>(); auto fmt_props_2 = LvlInitStruct<VkFormatProperties2>(&fmt_props_3); if (has_format_feature2) { DispatchGetPhysicalDeviceFormatProperties2(physical_device, format, &fmt_props_2); } else { VkFormatProperties format_properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties); fmt_props_3.linearTilingFeatures = format_properties.linearTilingFeatures; fmt_props_3.optimalTilingFeatures = format_properties.optimalTilingFeatures; fmt_props_3.bufferFeatures = format_properties.bufferFeatures; } return fmt_props_3; } bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec, const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const { bool skip = false; const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits; for (uint32_t i = 0; i < count; i++) { auto pvids_ci = (pipe_cis[i].pVertexInputState) ? LvlFindInChain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext) : nullptr; if (nullptr == pvids_ci) continue; const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get(); for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) { const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]); if (vibdd->binding >= device_limits->maxVertexInputBindings) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).", i, j, vibdd->binding, device_limits->maxVertexInputBindings); } if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).", i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor); } if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not " "enabled.", i, j); } if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not " "enabled.", i, j, vibdd->divisor); } // Find the corresponding binding description and validate input rate setting bool failed_01871 = true; for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) { if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) && (VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) { failed_01871 = false; break; } } if (failed_01871) { // Description not found, or has incorrect inputRate value skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's " "VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.", i, j, vibdd->binding); } } } return skip; } bool CoreChecks::ValidatePipelineCacheControlFlags(VkPipelineCreateFlags flags, uint32_t index, const char *caller_name, const char *vuid) const { bool skip = false; if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) { const VkPipelineCreateFlags invalid_flags = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT; if ((flags & invalid_flags) != 0) { skip |= LogError(device, vuid, "%s(): pipelineCreationCacheControl is turned off but pipeline[%u] has VkPipelineCreateFlags " "containing VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or " "VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT", caller_name, index); } } return skip; } bool CoreChecks::PreCallValidateCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) const { bool skip = false; if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) { if ((pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT) != 0) { skip |= LogError(device, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892", "vkCreatePipelineCache(): pipelineCreationCacheControl is turned off but pCreateInfo::flags contains " "VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT"); } } return skip; } bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, cgpl_state_data); create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); for (uint32_t i = 0; i < count; i++) { if (pCreateInfos[i].renderPass == VK_NULL_HANDLE) { if (!enabled_features.dynamic_rendering_features.dynamicRendering) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-dynamicRendering-06052", "vkCreateGraphicsPipeline: pCreateInfos[%" PRIu32 "].renderPass is VK_NULL_HANDLE but dynamicRendering is not enabled.", i); return true; } } } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i); } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i); } if (IsExtEnabled(device_extensions.vk_ext_vertex_attribute_divisor)) { skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos); } if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) { for (uint32_t i = 0; i < count; ++i) { // Validate depth-stencil state auto raster_state_ci = pCreateInfos[i].pRasterizationState; if ((VK_FALSE == enabled_features.portability_subset_features.separateStencilMaskRef) && raster_state_ci && (VK_CULL_MODE_NONE == raster_state_ci->cullMode)) { auto depth_stencil_ci = pCreateInfos[i].pDepthStencilState; if (depth_stencil_ci && (VK_TRUE == depth_stencil_ci->stencilTestEnable) && (depth_stencil_ci->front.reference != depth_stencil_ci->back.reference)) { skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-separateStencilMaskRef-04453", "Invalid Pipeline CreateInfo[%d] (portability error): VkStencilOpState::reference must be the " "same for front and back", i); } } // Validate color attachments uint32_t subpass = pCreateInfos[i].subpass; const auto render_pass = Get<RENDER_PASS_STATE>(pCreateInfos[i].renderPass); bool ignore_color_blend_state = pCreateInfos[i].pRasterizationState->rasterizerDiscardEnable || render_pass->createInfo.pSubpasses[subpass].colorAttachmentCount == 0; if ((VK_FALSE == enabled_features.portability_subset_features.constantAlphaColorBlendFactors) && !ignore_color_blend_state) { auto color_blend_state = pCreateInfos[i].pColorBlendState; const auto attachments = color_blend_state->pAttachments; for (uint32_t color_attachment_index = 0; i < color_blend_state->attachmentCount; ++i) { if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor) || (VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor)) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04454", "Invalid Pipeline CreateInfo[%d] (portability error): srcColorBlendFactor for color attachment %d must " "not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA", i, color_attachment_index); } if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor) || (VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor)) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04455", "Invalid Pipeline CreateInfo[%d] (portability error): dstColorBlendFactor for color attachment %d must " "not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA", i, color_attachment_index); } } } } } return skip; } bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *ccpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, ccpl_state_data); auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); for (uint32_t i = 0; i < count; i++) { // TODO: Add Compute Pipeline Verification skip |= ValidateComputePipelineShaderState(ccpl_state->pipe_state[i].get()); skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateComputePipelines", "VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875"); } return skip; } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, crtpl_state_data); auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data); for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get(); const auto &create_info = pipeline->create_info.raytracing; if (create_info.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { std::shared_ptr<const PIPELINE_STATE> base_pipeline; if (create_info.basePipelineIndex != -1) { base_pipeline = crtpl_state->pipe_state[create_info.basePipelineIndex]; } else if (create_info.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = Get<PIPELINE_STATE>(create_info.basePipelineHandle); } if (!base_pipeline || !(base_pipeline->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError( device, "VUID-vkCreateRayTracingPipelinesNV-flags-03416", "vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the " "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag," "the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set."); } } skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ false); skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesNV", "VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905"); } return skip; } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, crtpl_state_data); auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data); for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get(); const auto &create_info = pipeline->create_info.raytracing; if (create_info.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { std::shared_ptr<const PIPELINE_STATE> base_pipeline; if (create_info.basePipelineIndex != -1) { base_pipeline = crtpl_state->pipe_state[create_info.basePipelineIndex]; } else if (create_info.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = Get<PIPELINE_STATE>(create_info.basePipelineHandle); } if (!base_pipeline || !(base_pipeline->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError( device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03416", "vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the " "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag," "the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set."); } } skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ true); skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesKHR", "VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905"); if (create_info.pLibraryInfo) { const std::vector<std::pair<const char *, VkPipelineCreateFlags>> vuid_map = { {"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04718", VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR}, {"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04719", VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR}, {"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04720", VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR}, {"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04721", VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR}, {"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04722", VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR}, {"VUID-VkRayTracingPipelineCreateInfoKHR-flags-04723", VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR}, }; for (uint32_t j = 0; j < create_info.pLibraryInfo->libraryCount; ++j) { const auto lib = Get<PIPELINE_STATE>(create_info.pLibraryInfo->pLibraries[j]); if ((lib->create_info.raytracing.flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) == 0) { skip |= LogError( device, "VUID-VkPipelineLibraryCreateInfoKHR-pLibraries-03381", "vkCreateRayTracingPipelinesKHR(): pCreateInfo[%" PRIu32 "].pLibraryInfo->pLibraries[%" PRIu32 "] was not created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR.", i, j); } for (const auto &pair : vuid_map) { if (create_info.flags & pair.second) { if ((lib->create_info.raytracing.flags & pair.second) == 0) { skip |= LogError( device, pair.first, "vkCreateRayTracingPipelinesKHR(): pCreateInfo[%" PRIu32 "].flags contains %s bit, but pCreateInfo[%" PRIu32 "].pLibraryInfo->pLibraries[%" PRIu32 "] was created without it.", i, string_VkPipelineCreateFlags(pair.second).c_str(), i, j); } } } } } } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo, uint32_t *pExecutableCount, VkPipelineExecutablePropertiesKHR *pProperties) const { bool skip = false; skip |= ValidatePipelineExecutableInfo(device, nullptr, "vkGetPipelineExecutablePropertiesKHR", "VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270"); return skip; } bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, const char *caller_name, const char *feature_vuid) const { bool skip = false; if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) { skip |= LogError(device, feature_vuid, "%s(): called when pipelineExecutableInfo feature is not enabled.", caller_name); } // vkGetPipelineExecutablePropertiesKHR will not have struct to validate further if (pExecutableInfo) { auto pi = LvlInitStruct<VkPipelineInfoKHR>(); pi.pipeline = pExecutableInfo->pipeline; // We could probably cache this instead of fetching it every time uint32_t executable_count = 0; DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executable_count, NULL); if (pExecutableInfo->executableIndex >= executable_count) { skip |= LogError( pExecutableInfo->pipeline, "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275", "%s(): VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with " "the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR", caller_name, pExecutableInfo->executableIndex, executable_count); } } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pStatisticCount, VkPipelineExecutableStatisticKHR *pStatistics) const { bool skip = false; skip |= ValidatePipelineExecutableInfo(device, pExecutableInfo, "vkGetPipelineExecutableStatisticsKHR", "VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272"); const auto pipeline_state = Get<PIPELINE_STATE>(pExecutableInfo->pipeline); if (!(pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) { skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274", "vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the " "VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set"); } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR( VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR *pStatistics) const { bool skip = false; skip |= ValidatePipelineExecutableInfo(device, pExecutableInfo, "vkGetPipelineExecutableInternalRepresentationsKHR", "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipelineExecutableInfo-03276"); const auto pipeline_state = Get<PIPELINE_STATE>(pExecutableInfo->pipeline); if (!(pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) { skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278", "vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the " "VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set"); } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) const { return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( this, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor), phys_dev_ext_props.push_descriptor_props.maxPushDescriptors, IsExtEnabled(device_extensions.vk_ext_descriptor_indexing), &enabled_features.core12, &enabled_features.inline_uniform_block_features, &phys_dev_ext_props.inline_uniform_block_props, &enabled_features.ray_tracing_acceleration_structure_features, &device_extensions); } enum DSL_DESCRIPTOR_GROUPS { DSL_TYPE_SAMPLERS = 0, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK, DSL_TYPE_ACCELERATION_STRUCTURE, DSL_TYPE_ACCELERATION_STRUCTURE_NV, DSL_NUM_DESCRIPTOR_GROUPS }; // Used by PreCallValidateCreatePipelineLayout. // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage std::valarray<uint32_t> GetDescriptorCountMaxPerStage( const DeviceFeatures *enabled_features, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { // Identify active pipeline stages std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT, VK_SHADER_STAGE_COMPUTE_BIT}; if (enabled_features->core.geometryShader) { stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT); } if (enabled_features->core.tessellationShader) { stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); } // Allow iteration over enum values std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = { DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK, DSL_TYPE_ACCELERATION_STRUCTURE, DSL_TYPE_ACCELERATION_STRUCTURE_NV, }; // Sum by layouts per stage, then pick max of stages per type std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages for (auto stage : stage_flags) { std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums for (const auto &dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) { switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: // count one block per binding. descriptorCount is number of bytes stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++; break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: stage_sum[DSL_TYPE_ACCELERATION_STRUCTURE] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: stage_sum[DSL_TYPE_ACCELERATION_STRUCTURE_NV] += binding->descriptorCount; break; default: break; } } } } for (auto type : dsl_groups) { max_sum[type] = std::max(stage_sum[type], max_sum[type]); } } return max_sum; } // Used by PreCallValidateCreatePipelineLayout. // Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type. // Note: descriptors only count against the limit once even if used by multiple stages. std::map<uint32_t, uint32_t> GetDescriptorSum( const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { std::map<uint32_t, uint32_t> sum_by_type; for (const auto &dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { // count one block per binding. descriptorCount is number of bytes sum_by_type[binding->descriptorType]++; } else { sum_by_type[binding->descriptorType] += binding->descriptorCount; } } } } return sum_by_type; } bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) const { bool skip = false; std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr); unsigned int push_descriptor_set_count = 0; { for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) { set_layouts[i] = Get<cvdescriptorset::DescriptorSetLayout>(pCreateInfo->pSetLayouts[i]); if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count; if (set_layouts[i]->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-04606", "vkCreatePipelineLayout(): pCreateInfo->pSetLayouts[%" PRIu32 "] was created with VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE bit.", i); } } } if (push_descriptor_set_count > 1) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293", "vkCreatePipelineLayout() Multiple push descriptor sets found."); } // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true); // Samplers if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorSamplers limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers); } // Uniform buffers if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUniformBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorUniformBuffers); } // Storage buffers if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorStorageBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorStorageBuffers); } // Sampled images if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorSampledImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages); } // Storage images if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorStorageImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages); } // Input attachments if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorInputAttachments limit (%d).", max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props.limits.maxPerStageDescriptorInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214" : "VUID-VkPipelineLayoutCreateInfo-descriptorType-02212"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorInlineUniformBlocks limit (%d).", max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks); } if (max_descriptors_per_stage[DSL_TYPE_ACCELERATION_STRUCTURE] > phys_dev_ext_props.acc_structure_props.maxPerStageDescriptorUpdateAfterBindAccelerationStructures) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03572", "vkCreatePipelineLayout(): max per-stage acceleration structure bindings count (%" PRIu32 ") exceeds device " "maxPerStageDescriptorInlineUniformBlocks limit (%" PRIu32 ").", max_descriptors_per_stage[DSL_TYPE_ACCELERATION_STRUCTURE], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks); } // Total descriptors by type // std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true); // Samplers uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetSamplers limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSamplers); } // Uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic); } // Storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers); } // Dynamic storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic); } // Sampled images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetSampledImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSampledImages); } // Storage images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetStorageImages); } // Input attachments if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetInputAttachments limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments); } // Inline uniform blocks if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) { const char *vuid = IsExtEnabled(device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216" : "VUID-VkPipelineLayoutCreateInfo-descriptorType-02213"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetInlineUniformBlocks limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks); } // Acceleration structures NV if (sum_all_stages[VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV] > phys_dev_ext_props.ray_tracing_propsNV.maxDescriptorSetAccelerationStructures) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02381", "vkCreatePipelineLayout(): sum of acceleration structures NV bindings among all stages (%" PRIu32 ") exceeds device " "VkPhysicalDeviceRayTracingPropertiesNV::maxDescriptorSetAccelerationStructures limit (%" PRIu32 ").", sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.ray_tracing_propsNV.maxDescriptorSetAccelerationStructures); } if (IsExtEnabled(device_extensions.vk_ext_descriptor_indexing)) { // XXX TODO: replace with correct VU messages // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false); // Samplers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022", "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers); } // Uniform buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023", "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers); } // Storage buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024", "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers); } // Sampled images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025", "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages); } // Storage images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026", "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages); } // Input attachments if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027", "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215", "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks); } // Total descriptors by type, summed across all pipeline stages // std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false); // Samplers sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036", "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSamplers limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers); } // Uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037", "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038", "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic); } // Storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039", "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers); } // Dynamic storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040", "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic); } // Sampled images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041", "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages); } // Storage images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042", "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages); } // Input attachments if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043", "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments); } // Inline uniform blocks if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217", "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks); } } if (IsExtEnabled(device_extensions.vk_ext_fragment_density_map2)) { uint32_t sum_subsampled_samplers = 0; for (const auto &dsl : set_layouts) { // find the number of subsampled samplers across all stages // NOTE: this does not use the GetDescriptorSum patter because it needs the Get<SAMPLER_STATE> method if ((dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) || (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)) && (binding->pImmutableSamplers != nullptr)) { for (uint32_t sampler_idx = 0; sampler_idx < binding->descriptorCount; sampler_idx++) { const auto state = Get<SAMPLER_STATE>(binding->pImmutableSamplers[sampler_idx]); if (state && (state->createInfo.flags & (VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT | VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT))) { sum_subsampled_samplers++; } } } } } } if (sum_subsampled_samplers > phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566", "vkCreatePipelineLayout(): sum of sampler bindings with flags containing " "VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT or " "VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT among all stages(% d) " "exceeds device maxDescriptorSetSubsampledSamplers limit (%d).", sum_subsampled_samplers, phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers); } } return skip; } bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) const { // Make sure sets being destroyed are not currently in-use if (disabled[object_in_use]) return false; bool skip = false; const auto pool = Get<DESCRIPTOR_POOL_STATE>(descriptorPool); if (pool && pool->InUse()) { skip |= LogError(descriptorPool, "VUID-vkResetDescriptorPool-descriptorPool-00313", "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer."); } return skip; } // Ensure the pool contains enough descriptors and descriptor sets to satisfy // an allocation request. Fills common_data with the total number of descriptors of each type required, // as well as DescriptorSetLayout ptrs used for later update. bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets, void *ads_state_data) const { StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data); cvdescriptorset::AllocateDescriptorSetsData *ads_state = reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data); // All state checks for AllocateDescriptorSets is done in single function return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state); } bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) const { // Make sure that no sets being destroyed are in-flight bool skip = false; // First make sure sets being destroyed are not currently in-use for (uint32_t i = 0; i < count; ++i) { if (pDescriptorSets[i] != VK_NULL_HANDLE) { skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets"); } } const auto pool_state = Get<DESCRIPTOR_POOL_STATE>(descriptorPool); if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) { // Can't Free from a NON_FREE pool skip |= LogError(descriptorPool, "VUID-vkFreeDescriptorSets-descriptorPool-00312", "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) const { // First thing to do is perform map look-ups. // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets // so we can't just do a single map look-up up-front, but do them individually in functions below // Now make call(s) that validate state, but don't perform state updates in this function // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the // namespace which will parse params and make calls into specific class instances return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, "vkUpdateDescriptorSets()"); } static bool UniqueImageViews(const VkRenderingInfoKHR* pRenderingInfo, VkImageView imageView) { bool unique_views = true; for (uint32_t i = 0; i < pRenderingInfo->colorAttachmentCount; ++i) { if (pRenderingInfo->pColorAttachments[i].imageView == imageView) { unique_views = false; } if (pRenderingInfo->pColorAttachments[i].resolveImageView == imageView) { unique_views = false; } } if (pRenderingInfo->pDepthAttachment) { if (pRenderingInfo->pDepthAttachment->imageView == imageView) { unique_views = false; } if (pRenderingInfo->pDepthAttachment->resolveImageView == imageView) { unique_views = false; } } if (pRenderingInfo->pStencilAttachment) { if (pRenderingInfo->pStencilAttachment->imageView == imageView) { unique_views = false; } if (pRenderingInfo->pStencilAttachment->resolveImageView == imageView) { unique_views = false; } } return unique_views; } bool CoreChecks::PreCallValidateCmdBeginRenderingKHR(VkCommandBuffer commandBuffer, const VkRenderingInfoKHR *pRenderingInfo) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!cb_state) return false; bool skip = false; if (!enabled_features.dynamic_rendering_features.dynamicRendering) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginRenderingKHR-dynamicRendering-06446", "vkCmdBeginRenderingKHR(): dynamicRendering is not enabled."); } if ((cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && ((pRenderingInfo->flags & VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginRenderingKHR-commandBuffer-06068", "vkCmdBeginRenderingKHR(): pRenderingInfo->flags must not include " "VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR in a secondary command buffer."); } if (pRenderingInfo->viewMask != 0 && pRenderingInfo->layerCount == 0) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-viewMask-06069", "vkCmdBeginRenderingKHR(): If viewMask is not 0 (%u), layerCount must not be 0 (%u)", pRenderingInfo->viewMask, pRenderingInfo->layerCount); } auto rendering_fragment_shading_rate_attachment_info = LvlFindInChain<VkRenderingFragmentShadingRateAttachmentInfoKHR>(pRenderingInfo->pNext); if (rendering_fragment_shading_rate_attachment_info && (rendering_fragment_shading_rate_attachment_info->imageView != VK_NULL_HANDLE)) { auto view_state = Get<IMAGE_VIEW_STATE>(rendering_fragment_shading_rate_attachment_info->imageView); if (pRenderingInfo->viewMask != 0) { uint32_t highest_view_bit = MostSignificantBit(pRenderingInfo->viewMask); if (view_state->create_info.subresourceRange.layerCount != 1 && view_state->create_info.subresourceRange.layerCount < highest_view_bit) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-imageView-06124", "vkCmdBeginRenderingKHR(): imageView must have a layerCount (%u) that either equal to 1 or greater than or equal to the index of the most significant bit in viewMask (%u)", view_state->create_info.subresourceRange.layerCount, highest_view_bit); } } if (UniqueImageViews(pRenderingInfo, rendering_fragment_shading_rate_attachment_info->imageView) == false) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-imageView-06125", "vkCmdBeginRenderingKHR(): imageView or resolveImageView member of pDepthAttachment, pStencilAttachment, or any element of pColorAttachments must not equal VkRenderingFragmentShadingRateAttachmentInfoKHR->vimageView"); } if (rendering_fragment_shading_rate_attachment_info->imageLayout != VK_IMAGE_LAYOUT_GENERAL && rendering_fragment_shading_rate_attachment_info->imageLayout != VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06147", "vkCmdBeginRenderingKHR(): VkRenderingFragmentShadingRateAttachmentInfoKHR->layout (%s) must be VK_IMAGE_LAYOUT_GENERAL or VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR", string_VkImageLayout(rendering_fragment_shading_rate_attachment_info->imageLayout)); } if (!IsPowerOfTwo(rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width)) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06149", "vkCmdBeginRenderingKHR(): shadingRateAttachmentTexelSize.width (%u) must be a power of two", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width); } auto max_frs_attach_texel_width = phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width; if (rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width > max_frs_attach_texel_width) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06150", "vkCmdBeginRenderingKHR(): shadingRateAttachmentTexelSize.width (%u) must be less than or equal to maxFragmentShadingRateAttachmentTexelSize.width (%u)", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width, max_frs_attach_texel_width); } auto min_frs_attach_texel_width = phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width; if (rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width < min_frs_attach_texel_width) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06151", "vkCmdBeginRenderingKHR(): shadingRateAttachmentTexelSize.width (%u) must be greater than or equal to minFragmentShadingRateAttachmentTexelSize.width (%u)", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width, min_frs_attach_texel_width); } if (!IsPowerOfTwo(rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height)) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06152", "vkCmdBeginRenderingKHR(): shadingRateAttachmentTexelSize.height (%u) must be a power of two", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height); } auto max_frs_attach_texel_height = phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height; if (rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height > max_frs_attach_texel_height) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06153", "vkCmdBeginRenderingKHR(): shadingRateAttachmentTexelSize.height (%u) must be less than or equal to maxFragmentShadingRateAttachmentTexelSize.height (%u)", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height, max_frs_attach_texel_height); } auto min_frs_attach_texel_height = phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height; if (rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height < min_frs_attach_texel_height) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06154", "vkCmdBeginRenderingKHR(): shadingRateAttachmentTexelSize.height (%u) must be greater than or equal to minFragmentShadingRateAttachmentTexelSize.height (%u)", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height, min_frs_attach_texel_height); } auto max_frs_attach_texel_aspect_ratio = phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio; if ((rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width / rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height) > max_frs_attach_texel_aspect_ratio) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06155", "vkCmdBeginRenderingKHR(): the quotient of shadingRateAttachmentTexelSize.width (%u) and shadingRateAttachmentTexelSize.height (%u) must be less than or equal to maxFragmentShadingRateAttachmentTexelSizeAspectRatio (%u)", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width, rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height, max_frs_attach_texel_aspect_ratio); } if ((rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height / rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width) > max_frs_attach_texel_aspect_ratio) { skip |= LogError(commandBuffer, "VUID-VkRenderingFragmentShadingRateAttachmentInfoKHR-imageView-06156", "vkCmdBeginRenderingKHR(): the quotient of shadingRateAttachmentTexelSize.height (%u) and shadingRateAttachmentTexelSize.width (%u) must be less than or equal to maxFragmentShadingRateAttachmentTexelSizeAspectRatio (%u)", rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.height, rendering_fragment_shading_rate_attachment_info->shadingRateAttachmentTexelSize.width, max_frs_attach_texel_aspect_ratio); } } if (!(IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples) || IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples))) { uint32_t first_sample_count_attachment = VK_ATTACHMENT_UNUSED; for (uint32_t j = 0; j < pRenderingInfo->colorAttachmentCount; ++j) { if (pRenderingInfo->pColorAttachments[j].imageView != VK_NULL_HANDLE) { auto image_view = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pColorAttachments[j].imageView); first_sample_count_attachment = (j == 0u) ? static_cast<uint32_t>(image_view->samples) : first_sample_count_attachment; if (first_sample_count_attachment != image_view->samples) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-imageView-06070", "vkCmdBeginRenderingKHR(): Color attachment ref %u has sample count %s, whereas first color " "attachment ref has " "sample count %u.", j, string_VkSampleCountFlagBits(image_view->samples), (first_sample_count_attachment)); } } } } if (!(pRenderingInfo->colorAttachmentCount <= phys_dev_props.limits.maxColorAttachments)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06106", "vkCmdBeginRenderingKHR(): colorAttachmentCount (%u) must be less than or equal to " "VkPhysicalDeviceLimits::maxColorAttachments (%u)", pRenderingInfo->colorAttachmentCount, phys_dev_props.limits.maxColorAttachments); } auto fragment_density_map_attachment_info = LvlFindInChain<VkRenderingFragmentDensityMapAttachmentInfoEXT>(pRenderingInfo->pNext); if (fragment_density_map_attachment_info) { if (UniqueImageViews(pRenderingInfo, fragment_density_map_attachment_info->imageView) == false) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-imageView-06116", "vkCmdBeginRenderingKHR(): imageView or resolveImageView member of pDepthAttachment, pStencilAttachment, or any element of pColorAttachments must not equal VkRenderingFragmentDensityMapAttachmentInfoEXT->imageView"); } } if ((enabled_features.core11.multiview == VK_FALSE) && (pRenderingInfo->viewMask != 0)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-multiview-06127", "vkCmdBeginRenderingKHR(): If the multiview feature is not enabled, viewMask must be 0 (%u)", pRenderingInfo->viewMask); } auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderingInfo->pNext); if (!chained_device_group_struct || (chained_device_group_struct && chained_device_group_struct->deviceRenderAreaCount == 0)) { if (pRenderingInfo->renderArea.offset.x < 0) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06077", "vkCmdBeginRenderingKHR(): renderArea.offset.x is %d and must be greater than 0", pRenderingInfo->renderArea.offset.x); } if (pRenderingInfo->renderArea.offset.y < 0) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06078", "vkCmdBeginRenderingKHR(): renderArea.offset.y is %d and must be greater than 0", pRenderingInfo->renderArea.offset.y); } for (uint32_t j = 0; j < pRenderingInfo->colorAttachmentCount; ++j) { if (pRenderingInfo->pColorAttachments[j].imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pColorAttachments[j].imageView); IMAGE_STATE *image_state = image_view_state->image_state.get(); if (!(image_state->createInfo.extent.width >= pRenderingInfo->renderArea.offset.x + pRenderingInfo->renderArea.extent.width)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06079", "vkCmdBeginRenderingKHR(): width of the pColorAttachments[%u].imageView: %u must be greater than" "renderArea.offset.x + renderArea.extent.width", j, image_state->createInfo.extent.width); } if (!(image_state->createInfo.extent.height >= pRenderingInfo->renderArea.offset.y + pRenderingInfo->renderArea.extent.height)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06080", "vkCmdBeginRenderingKHR(): height of the pColorAttachments[%u].imageView: %u must be greater than" "renderArea.offset.y + renderArea.extent.height", j, image_state->createInfo.extent.height); } } } if (pRenderingInfo->pDepthAttachment != VK_NULL_HANDLE && pRenderingInfo->pDepthAttachment->imageView != VK_NULL_HANDLE) { auto depth_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pDepthAttachment->imageView); IMAGE_STATE *image_state = depth_view_state->image_state.get(); if (!(image_state->createInfo.extent.width >= pRenderingInfo->renderArea.offset.x + pRenderingInfo->renderArea.extent.width)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06079", "vkCmdBeginRenderingKHR(): width of the pDepthAttachment->imageView: %u must be greater than" "renderArea.offset.x + renderArea.extent.width", image_state->createInfo.extent.width); } if (!(image_state->createInfo.extent.height >= pRenderingInfo->renderArea.offset.y + pRenderingInfo->renderArea.extent.height)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06080", "vkCmdBeginRenderingKHR(): height of the pDepthAttachment->imageView: %u must be greater than" "renderArea.offset.y + renderArea.extent.height", image_state->createInfo.extent.height); } } if (pRenderingInfo->pStencilAttachment != VK_NULL_HANDLE && pRenderingInfo->pStencilAttachment->imageView != VK_NULL_HANDLE) { auto stencil_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pStencilAttachment->imageView); IMAGE_STATE *image_state = stencil_view_state->image_state.get(); if (!(image_state->createInfo.extent.width >= pRenderingInfo->renderArea.offset.x + pRenderingInfo->renderArea.extent.width)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06079", "vkCmdBeginRenderingKHR(): width of the pStencilAttachment->imageView: %u must be greater than" "renderArea.offset.x + renderArea.extent.width", image_state->createInfo.extent.width); } if (!(image_state->createInfo.extent.height >= pRenderingInfo->renderArea.offset.y + pRenderingInfo->renderArea.extent.height)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06080", "vkCmdBeginRenderingKHR(): height of the pStencilAttachment->imageView: %u must be greater than" "renderArea.offset.y + renderArea.extent.height", image_state->createInfo.extent.height); } } } if (chained_device_group_struct) { for (uint32_t deviceRenderAreaIndex = 0; deviceRenderAreaIndex < chained_device_group_struct->deviceRenderAreaCount; ++deviceRenderAreaIndex) { auto offset_x = chained_device_group_struct->pDeviceRenderAreas[deviceRenderAreaIndex].offset.x; auto width = chained_device_group_struct->pDeviceRenderAreas[deviceRenderAreaIndex].extent.width; if (!(offset_x >= 0)) { skip |= LogError(commandBuffer, "VUID-VkDeviceGroupRenderPassBeginInfo-offset-06166", "vkCmdBeginRenderingKHR(): pDeviceRenderAreas[%u].offset.x: %d must be greater than or equal to 0", deviceRenderAreaIndex, offset_x); } if ((offset_x + width) > phys_dev_props.limits.maxFramebufferWidth) { skip |= LogError(commandBuffer, "VUID-VkDeviceGroupRenderPassBeginInfo-offset-06168", "vkCmdBeginRenderingKHR(): pDeviceRenderAreas[%" PRIu32 "] sum of offset.x (%" PRId32 ") and extent.width (%" PRIu32 ") is greater than maxFramebufferWidth (%" PRIu32 ").", deviceRenderAreaIndex, offset_x, width, phys_dev_props.limits.maxFramebufferWidth); } auto offset_y = chained_device_group_struct->pDeviceRenderAreas[deviceRenderAreaIndex].offset.y; auto height = chained_device_group_struct->pDeviceRenderAreas[deviceRenderAreaIndex].extent.height; if (!(offset_y >= 0)) { skip |= LogError(commandBuffer, "VUID-VkDeviceGroupRenderPassBeginInfo-offset-06167", "vkCmdBeginRenderingKHR(): pDeviceRenderAreas[%u].offset.y: %d must be greater than or equal to 0", deviceRenderAreaIndex, offset_y); } if ((offset_y + height) > phys_dev_props.limits.maxFramebufferHeight) { skip |= LogError(commandBuffer, "VUID-VkDeviceGroupRenderPassBeginInfo-offset-06169", "vkCmdBeginRenderingKHR(): pDeviceRenderAreas[%" PRIu32 "] sum of offset.y (%" PRId32 ") and extent.height (%" PRIu32 ") is greater than maxFramebufferHeight (%" PRIu32 ").", deviceRenderAreaIndex, offset_y, height, phys_dev_props.limits.maxFramebufferHeight); } for (uint32_t j = 0; j < pRenderingInfo->colorAttachmentCount; ++j) { if (pRenderingInfo->pColorAttachments[j].imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pColorAttachments[j].imageView); IMAGE_STATE *image_state = image_view_state->image_state.get(); if (!(image_state->createInfo.extent.width >= offset_x + width)) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06083", "vkCmdBeginRenderingKHR(): width of the pColorAttachments[%u].imageView: %u must be greater than" "renderArea.offset.x + renderArea.extent.width", j, image_state->createInfo.extent.width); } if (!(image_state->createInfo.extent.height >= offset_y + height)) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06084", "vkCmdBeginRenderingKHR(): height of the pColorAttachments[%u].imageView: %u must be greater than" "renderArea.offset.y + renderArea.extent.height", j, image_state->createInfo.extent.height); } } } if (pRenderingInfo->pDepthAttachment != VK_NULL_HANDLE) { auto depth_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pDepthAttachment->imageView); IMAGE_STATE *image_state = depth_view_state->image_state.get(); if (!(image_state->createInfo.extent.width >= offset_x + width)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06083", "vkCmdBeginRenderingKHR(): width of the pDepthAttachment->imageView: %u must be greater than" "renderArea.offset.x + renderArea.extent.width", image_state->createInfo.extent.width); } if (!(image_state->createInfo.extent.height >= offset_y + height)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06084", "vkCmdBeginRenderingKHR(): height of the pDepthAttachment->imageView: %u must be greater than" "renderArea.offset.y + renderArea.extent.height", image_state->createInfo.extent.height); } } if (pRenderingInfo->pStencilAttachment != VK_NULL_HANDLE) { auto stencil_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pStencilAttachment->imageView); IMAGE_STATE *image_state = stencil_view_state->image_state.get(); if (!(image_state->createInfo.extent.width >= offset_x + width)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06083", "vkCmdBeginRenderingKHR(): width of the pStencilAttachment->imageView: %u must be greater than" "renderArea.offset.x + renderArea.extent.width", image_state->createInfo.extent.width); } if (!(image_state->createInfo.extent.height >= offset_y + height)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pNext-06084", "vkCmdBeginRenderingKHR(): height of the pStencilAttachment->imageView: %u must be greater than" "renderArea.offset.y + renderArea.extent.height", image_state->createInfo.extent.height); } } } } if (pRenderingInfo->pDepthAttachment != NULL && pRenderingInfo->pStencilAttachment != NULL) { if (pRenderingInfo->pDepthAttachment->imageView != VK_NULL_HANDLE && pRenderingInfo->pStencilAttachment->imageView != VK_NULL_HANDLE) { if (!(pRenderingInfo->pDepthAttachment->imageView == pRenderingInfo->pStencilAttachment->imageView)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pDepthAttachment-06085", "vkCmdBeginRenderingKHR(): imageView of pDepthAttachment and pStencilAttachment must be the same"); } if ((phys_dev_props_core12.independentResolveNone == VK_FALSE) && (pRenderingInfo->pDepthAttachment->resolveMode != pRenderingInfo->pStencilAttachment->resolveMode)) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pDepthAttachment-06104", "vkCmdBeginRenderingKHR(): The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.", pRenderingInfo->pDepthAttachment->resolveMode, pRenderingInfo->pStencilAttachment->resolveMode); } if ((phys_dev_props_core12.independentResolve == VK_FALSE) && (pRenderingInfo->pDepthAttachment->resolveMode != VK_RESOLVE_MODE_NONE) && (pRenderingInfo->pStencilAttachment->resolveMode != VK_RESOLVE_MODE_NONE) && (pRenderingInfo->pStencilAttachment->resolveMode != pRenderingInfo->pDepthAttachment->resolveMode)) { skip |= LogError(device, "VUID-VkRenderingInfoKHR-pDepthAttachment-06105", "vkCmdBeginRenderingKHR(): The values of depthResolveMode (%u) and stencilResolveMode (%u) must " "be identical, or " "one of them must be %u.", pRenderingInfo->pDepthAttachment->resolveMode, pRenderingInfo->pStencilAttachment->resolveMode, VK_RESOLVE_MODE_NONE); } } if (pRenderingInfo->pDepthAttachment->resolveMode != VK_RESOLVE_MODE_NONE && pRenderingInfo->pStencilAttachment->resolveMode != VK_RESOLVE_MODE_NONE) { if (!(pRenderingInfo->pDepthAttachment->resolveImageView == pRenderingInfo->pStencilAttachment->resolveImageView)) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pDepthAttachment-06086", "vkCmdBeginRenderingKHR(): resolveImageView of pDepthAttachment and pStencilAttachment must be the same"); } } } for (uint32_t j = 0; j < pRenderingInfo->colorAttachmentCount; ++j) { skip |= ValidateRenderingAttachmentInfoKHR(commandBuffer, &pRenderingInfo->pColorAttachments[j]); if (pRenderingInfo->pColorAttachments[j].imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pColorAttachments[j].imageView); IMAGE_STATE *image_state = image_view_state->image_state.get(); if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06087", "vkCmdBeginRenderingKHR(): VkRenderingInfoKHR->colorAttachment[%u] must have been created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT", j); } if (pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06090", "vkCmdBeginRenderingKHR(): imageLayout must not be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL or " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL"); } if (pRenderingInfo->pColorAttachments[j].resolveMode != VK_RESOLVE_MODE_NONE) { if (pRenderingInfo->pColorAttachments[j].resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || pRenderingInfo->pColorAttachments[j].resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06091", "vkCmdBeginRenderingKHR(): resolveImageLayout must not be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL or " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL"); } } if (pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL || pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06096", "vkCmdBeginRenderingKHR(): imageLayout must not be " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL " "or VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL"); } if (pRenderingInfo->pColorAttachments[j].resolveMode != VK_RESOLVE_MODE_NONE) { if (pRenderingInfo->pColorAttachments[j].resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL || pRenderingInfo->pColorAttachments[j].resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06097", "vkCmdBeginRenderingKHR(): resolveImageLayout must not be " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL or " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL"); } } if (pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL || pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL || pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL || pRenderingInfo->pColorAttachments[j].imageLayout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06100", "vkCmdBeginRenderingKHR(): imageLayout must not be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL" " or VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL" " or VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL" " or VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL"); } if (pRenderingInfo->pColorAttachments[j].resolveMode != VK_RESOLVE_MODE_NONE) { if (pRenderingInfo->pColorAttachments[j].resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL || pRenderingInfo->pColorAttachments[j].resolveImageLayout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-colorAttachmentCount-06101", "vkCmdBeginRenderingKHR(): resolveImageLayout must not be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL"); } } } } if (pRenderingInfo->pDepthAttachment != NULL) { skip |= ValidateRenderingAttachmentInfoKHR(commandBuffer, pRenderingInfo->pDepthAttachment); if (pRenderingInfo->pDepthAttachment->imageView != VK_NULL_HANDLE) { auto depth_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pDepthAttachment->imageView); IMAGE_STATE *image_state = depth_view_state->image_state.get(); if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pDepthAttachment-06088", "vkCmdBeginRenderingKHR(): depth image must have been created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT"); } if (pRenderingInfo->pDepthAttachment->imageLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pDepthAttachment-06092", "vkCmdBeginRenderingKHR(): image must not have been created with VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL"); } if (pRenderingInfo->pDepthAttachment->resolveMode != VK_RESOLVE_MODE_NONE) { if (pRenderingInfo->pDepthAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pDepthAttachment-06093", "vkCmdBeginRenderingKHR(): resolveImageLayout must not be VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL"); } if (pRenderingInfo->pDepthAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pDepthAttachment-06098", "vkCmdBeginRenderingKHR(): resolveImageLayout must not be " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL"); } if (!(pRenderingInfo->pDepthAttachment->resolveMode & phys_dev_props_core12.supportedDepthResolveModes)) { skip |= LogError(device, "VUID-VkRenderingInfoKHR-pDepthAttachment-06102", "vkCmdBeginRenderingKHR(): Includes a resolveMode structure with invalid mode=%u.", pRenderingInfo->pDepthAttachment->resolveMode); } } } } if (pRenderingInfo->pStencilAttachment != NULL) { skip |= ValidateRenderingAttachmentInfoKHR(commandBuffer, pRenderingInfo->pStencilAttachment); if (pRenderingInfo->pStencilAttachment->imageView != VK_NULL_HANDLE) { auto stencil_view_state = Get<IMAGE_VIEW_STATE>(pRenderingInfo->pStencilAttachment->imageView); IMAGE_STATE *image_state = stencil_view_state->image_state.get(); if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pStencilAttachment-06089", "vkCmdBeginRenderingKHR(): stencil image must have been created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT"); } if (pRenderingInfo->pStencilAttachment->imageLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pStencilAttachment-06094", "vkCmdBeginRenderingKHR(): imageLayout must not be VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL"); } if (pRenderingInfo->pStencilAttachment->resolveMode != VK_RESOLVE_MODE_NONE) { if (pRenderingInfo->pStencilAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { skip |= LogError( commandBuffer, "VUID-VkRenderingInfoKHR-pStencilAttachment-06095", "vkCmdBeginRenderingKHR(): resolveImageLayout must not be VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL"); } if (pRenderingInfo->pStencilAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL) { skip |= LogError(commandBuffer, "VUID-VkRenderingInfoKHR-pStencilAttachment-06099", "vkCmdBeginRenderingKHR(): resolveImageLayout must not be " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL"); } if (!(pRenderingInfo->pStencilAttachment->resolveMode & phys_dev_props_core12.supportedStencilResolveModes)) { skip |= LogError(device, "VUID-VkRenderingInfoKHR-pStencilAttachment-06103", "vkCmdBeginRenderingKHR(): Includes a resolveMode structure with invalid mode (%s).", string_VkResolveModeFlagBits(pRenderingInfo->pStencilAttachment->resolveMode)); } } } } return skip; } bool CoreChecks::ValidateRenderingAttachmentInfoKHR(VkCommandBuffer commandBuffer, const VkRenderingAttachmentInfoKHR *pAttachment) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!cb_state) return false; bool skip = false; if (pAttachment->imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>(pAttachment->imageView); if ((!FormatIsSINT(image_view_state->create_info.format) && !FormatIsUINT(image_view_state->create_info.format)) && !(pAttachment->resolveMode == VK_RESOLVE_MODE_NONE || pAttachment->resolveMode == VK_RESOLVE_MODE_AVERAGE_BIT)) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06129", "vkCmdBeginRenderingKHR(): Current resolve mode (%s) must be VK_RESOLVE_MODE_NONE or " "VK_RESOLVE_MODE_AVERAGE_BIT for non-integar formats (%s)", string_VkResolveModeFlags(pAttachment->resolveMode).c_str(), string_VkFormat(image_view_state->create_info.format)); } if ((FormatIsSINT(image_view_state->create_info.format) || FormatIsUINT(image_view_state->create_info.format)) && !(pAttachment->resolveMode == VK_RESOLVE_MODE_NONE || pAttachment->resolveMode == VK_RESOLVE_MODE_SAMPLE_ZERO_BIT)) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06130", "vkCmdBeginRenderingKHR(): Current resolve mode (%s) must be VK_RESOLVE_MODE_NONE or " "VK_RESOLVE_MODE_SAMPLE_ZERO_BIT for integar formats (%s)", string_VkResolveModeFlags(pAttachment->resolveMode).c_str(), string_VkFormat(image_view_state->create_info.format)); } if (pAttachment->imageLayout == VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06143", "vkCmdBeginRenderingKHR(): layout must not be VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR"); } if (pAttachment->imageLayout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06140", "vkCmdBeginRenderingKHR(): layout must not be VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT"); } if (pAttachment->resolveMode != VK_RESOLVE_MODE_NONE && image_view_state->samples == VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06132", "Image sample count must not have a VK_SAMPLE_COUNT_1_BIT for Resolve Mode %s", string_VkResolveModeFlags(pAttachment->resolveMode).c_str()); } auto resolve_view_state = Get<IMAGE_VIEW_STATE>(pAttachment->resolveImageView); if (pAttachment->resolveMode != VK_RESOLVE_MODE_NONE && resolve_view_state->samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06133", "resolveImageView sample count must have a VK_SAMPLE_COUNT_1_BIT for Resolve Mode %s", string_VkResolveModeFlags(pAttachment->resolveMode).c_str()); } if (pAttachment->resolveMode != VK_RESOLVE_MODE_NONE) { if (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06144", "vlCmdBeginRenderingKHR(): resolveImageLayout must not be VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR"); } if (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06141", "vlCmdBeginRenderingKHR(): resolveImageLayout must not be VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT"); } if (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06142", "vlCmdBeginRenderingKHR(): resolveImageLayout must not be VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR"); } if (image_view_state->create_info.format != resolve_view_state->create_info.format) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06134", "resolveImageView format (%u) and ImageView format (%u) must have the same VkFormat", resolve_view_state->create_info.format, image_view_state->create_info.format); } if (((pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_UNDEFINED) || (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) || (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) || (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_PREINITIALIZED))) { skip |= LogError( commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06136", "resolveImageLayout (%s) must not be VK_IMAGE_LAYOUT_UNDEFINED, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, or VK_IMAGE_LAYOUT_PREINITIALIZED", string_VkImageLayout(pAttachment->resolveImageLayout)); } if (((pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL) || (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL))) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06137", "resolveImageLayout (%s) must not be VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL", string_VkImageLayout(pAttachment->resolveImageLayout)); } } if ((pAttachment->imageLayout == VK_IMAGE_LAYOUT_UNDEFINED) || (pAttachment->imageLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (pAttachment->imageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) || (pAttachment->imageLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) || (pAttachment->imageLayout == VK_IMAGE_LAYOUT_PREINITIALIZED)) { skip |= LogError( commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06135", "layout (%s) must not be VK_IMAGE_LAYOUT_UNDEFINED VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, or VK_IMAGE_LAYOUT_PREINITIALIZED", string_VkImageLayout(pAttachment->imageLayout)); } if (pAttachment->imageLayout == VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06138", "layout (%s) must not be VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV", string_VkImageLayout(pAttachment->imageLayout)); } if (pAttachment->resolveImageLayout == VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV) { skip |= LogError(commandBuffer, "VUID-VkRenderingAttachmentInfoKHR-imageView-06139", "layout (%s) must not be VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV", string_VkImageLayout(pAttachment->resolveImageLayout)); } } return skip; } bool CoreChecks::PreCallValidateCmdEndRenderingKHR(VkCommandBuffer commandBuffer) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!cb_state) return false; bool skip = false; if (cb_state->activeRenderPass) { if ((cb_state->activeRenderPass->use_dynamic_rendering == false) && (cb_state->activeRenderPass->use_dynamic_rendering_inherited == false)) { skip |= LogError( commandBuffer, "VUID-vkCmdEndRenderingKHR-None-06161", "Calling vkCmdEndRenderingKHR() in a render pass instance that was not begun with vkCmdBeginRenderingKHR()."); } if (cb_state->activeRenderPass->use_dynamic_rendering_inherited == true) { skip |= LogError(commandBuffer, "VUID-vkCmdEndRenderingKHR-commandBuffer-06162", "Calling vkCmdEndRenderingKHR() in a render pass instance that was not begun in this command buffer."); } } return skip; } bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!cb_state) return false; bool skip = false; if (cb_state->InUse()) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049", "Calling vkBeginCommandBuffer() on active %s before it has completed. You must check " "command buffer fence before this call.", report_data->FormatHandle(commandBuffer).c_str()); } if (cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { // Primary Command Buffer const VkCommandBufferUsageFlags invalid_usage = (VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT); if ((pBeginInfo->flags & invalid_usage) == invalid_usage) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-02840", "vkBeginCommandBuffer(): Primary %s can't have both VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT and " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(commandBuffer).c_str()); } } else { // Secondary Command Buffer const VkCommandBufferInheritanceInfo *info = pBeginInfo->pInheritanceInfo; if (!info) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00051", "vkBeginCommandBuffer(): Secondary %s must have inheritance info.", report_data->FormatHandle(commandBuffer).c_str()); } else { auto p_inherited_rendering_info = LvlFindInChain<VkCommandBufferInheritanceRenderingInfoKHR>(info->pNext); if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { const auto framebuffer = Get<FRAMEBUFFER_STATE>(info->framebuffer); if (framebuffer) { if (framebuffer->createInfo.renderPass != info->renderPass) { const auto render_pass = Get<RENDER_PASS_STATE>(info->renderPass); // renderPass that framebuffer was created with must be compatible with local renderPass skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer", render_pass.get(), "vkBeginCommandBuffer()", "VUID-VkCommandBufferBeginInfo-flags-00055"); } } if (info->renderPass != VK_NULL_HANDLE) { const auto render_pass = Get<RENDER_PASS_STATE>(info->renderPass); if (!render_pass) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-06000", "vkBeginCommandBuffer(): Renderpass must be a valid VkRenderPass"); } else { if (info->subpass >= render_pass->createInfo.subpassCount) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-06001", "vkBeginCommandBuffer(): Subpass member of pInheritanceInfo must be a valid subpass " "index within pInheritanceInfo->renderPass"); } } } else { if (!p_inherited_rendering_info) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-06002", "vkBeginCommandBuffer():The pNext chain of pInheritanceInfo must include a " "VkCommandBufferInheritanceRenderingInfoKHR structure"); } } } if (p_inherited_rendering_info) { auto p_attachment_sample_count_info_amd = LvlFindInChain<VkAttachmentSampleCountInfoAMD>(info->pNext); if (p_attachment_sample_count_info_amd && p_attachment_sample_count_info_amd->colorAttachmentCount != p_inherited_rendering_info->colorAttachmentCount) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-06003", "vkBeginCommandBuffer(): VkAttachmentSampleCountInfo{AMD,NV}->colorAttachmentCount[%u] must equal VkCommandBufferInheritanceRenderingInfoKHR->colorAttachmentCount[%u]", p_attachment_sample_count_info_amd->colorAttachmentCount, p_inherited_rendering_info->colorAttachmentCount); } const VkSampleCountFlags AllVkSampleCountFlagBits = VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT | VK_SAMPLE_COUNT_16_BIT | VK_SAMPLE_COUNT_32_BIT | VK_SAMPLE_COUNT_64_BIT; if ((p_inherited_rendering_info->colorAttachmentCount != 0) && (p_inherited_rendering_info->rasterizationSamples & AllVkSampleCountFlagBits) == 0) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-colorAttachmentCount-06004", "vkBeginCommandBuffer(): VkCommandBufferInheritanceRenderingInfoKHR->colorAttachmentCount (%" PRIu32 ") is not 0, rasterizationSamples (%s) must be valid VkSampleCountFlagBits value", p_inherited_rendering_info->colorAttachmentCount, string_VkSampleCountFlagBits(p_inherited_rendering_info->rasterizationSamples)); } if ((enabled_features.core.variableMultisampleRate == false) && (p_inherited_rendering_info->rasterizationSamples & AllVkSampleCountFlagBits) == 0) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-variableMultisampleRate-06005", "vkBeginCommandBuffer(): If the variableMultisampleRate feature is not enabled, rasterizationSamples (%s) must be a valid VkSampleCountFlagBits", string_VkSampleCountFlagBits(p_inherited_rendering_info->rasterizationSamples)); } const VkFormatFeatureFlags2KHR valid_color_format = VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR; for (uint32_t i = 0; i < p_inherited_rendering_info->colorAttachmentCount; ++i) { if (p_inherited_rendering_info->pColorAttachmentFormats != nullptr) { const VkFormat attachment_format = p_inherited_rendering_info->pColorAttachmentFormats[i]; if (attachment_format != VK_FORMAT_UNDEFINED) { const VkFormatFeatureFlags2KHR potential_format_features = GetPotentialFormatFeatures(attachment_format); if ((potential_format_features & valid_color_format) == 0) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-pColorAttachmentFormats-06006", "vkBeginCommandBuffer(): VkCommandBufferInheritanceRenderingInfoKHR->pColorAttachmentFormats[%u] (%s) must be a format with potential format features that include VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT", i, string_VkFormat(attachment_format)); } } } } const VkFormatFeatureFlags2KHR valid_depth_stencil_format = VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR; const VkFormat depth_format = p_inherited_rendering_info->depthAttachmentFormat; if (depth_format != VK_FORMAT_UNDEFINED) { const VkFormatFeatureFlags2KHR potential_format_features = GetPotentialFormatFeatures(depth_format); if ((potential_format_features & valid_depth_stencil_format) == 0) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-depthAttachmentFormat-06007", "vkBeginCommandBuffer(): VkCommandBufferInheritanceRenderingInfoKHR->depthAttachmentFormat (%s) must be a format with potential format features that include VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT", string_VkFormat(depth_format)); } } const VkFormat stencil_format = p_inherited_rendering_info->stencilAttachmentFormat; if (stencil_format != VK_FORMAT_UNDEFINED) { const VkFormatFeatureFlags2KHR potential_format_features = GetPotentialFormatFeatures(stencil_format); if ((potential_format_features & valid_depth_stencil_format) == 0) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-stencilAttachmentFormat-06199", "vkBeginCommandBuffer(): VkCommandBufferInheritanceRenderingInfoKHR->stencilAttachmentFormat (%s) must be a format with potential format features that include VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT", string_VkFormat(stencil_format)); } } if ((depth_format != VK_FORMAT_UNDEFINED && stencil_format != VK_FORMAT_UNDEFINED) && (depth_format != stencil_format)) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-depthAttachmentFormat-06200", "vkBeginCommandBuffer(): VkCommandBufferInheritanceRenderingInfoKHR->depthAttachmentFormat (%s) must equal VkCommandBufferInheritanceRenderingInfoKHR->stencilAttachmentFormat (%s)", string_VkFormat(depth_format), string_VkFormat(stencil_format)); } if ((enabled_features.core11.multiview == VK_FALSE) && (p_inherited_rendering_info->viewMask != 0)) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-multiview-06008", "vkBeginCommandBuffer(): If the multiview feature is not enabled, viewMask must be 0 (%u)", p_inherited_rendering_info->viewMask); } if (MostSignificantBit(p_inherited_rendering_info->viewMask) >= phys_dev_props_core11.maxMultiviewViewCount) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceRenderingInfoKHR-viewMask-06009", "vkBeginCommandBuffer(): Most significant bit VkCommandBufferInheritanceRenderingInfoKHR->viewMask(%u) must be less maxMultiviewViewCount(%u)", p_inherited_rendering_info->viewMask, phys_dev_props_core11.maxMultiviewViewCount); } } } if (info) { if ((info->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) && (info->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00052", "vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if " "occulusionQuery is disabled or the device does not support precise occlusion queries.", report_data->FormatHandle(commandBuffer).c_str()); } auto p_inherited_viewport_scissor_info = LvlFindInChain<VkCommandBufferInheritanceViewportScissorInfoNV>(info->pNext); if (p_inherited_viewport_scissor_info != nullptr && p_inherited_viewport_scissor_info->viewportScissor2D) { if (!enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04782", "vkBeginCommandBuffer(): inheritedViewportScissor2D feature not enabled."); } if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04786", "vkBeginCommandBuffer(): Secondary %s must be recorded with the" "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT if viewportScissor2D is VK_TRUE.", report_data->FormatHandle(commandBuffer).c_str()); } if (p_inherited_viewport_scissor_info->viewportDepthCount == 0) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04784", "vkBeginCommandBuffer(): " "If viewportScissor2D is VK_TRUE, then viewportDepthCount must be greater than 0."); } } if (info->renderPass != VK_NULL_HANDLE) { const auto render_pass = Get<RENDER_PASS_STATE>(info->renderPass); if (render_pass) { if (info->subpass >= render_pass->createInfo.subpassCount) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-00054", "vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is " "less than the number of subpasses (%d).", report_data->FormatHandle(commandBuffer).c_str(), info->subpass, render_pass->createInfo.subpassCount); } } } } } if (CB_RECORDING == cb_state->state) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049", "vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call " "vkEndCommandBuffer().", report_data->FormatHandle(commandBuffer).c_str()); } else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) { VkCommandPool cmd_pool = cb_state->createInfo.commandPool; const auto *pool = cb_state->command_pool; if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) { LogObjectList objlist(commandBuffer); objlist.add(cmd_pool); skip |= LogError(objlist, "VUID-vkBeginCommandBuffer-commandBuffer-00050", "Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from " "%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str()); } } auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, commandBuffer, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, commandBuffer, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107"); } return skip; } bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!cb_state) return false; bool skip = false; if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) || !(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // This needs spec clarification to update valid usage, see comments in PR: // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165 skip |= InsideRenderPass(cb_state.get(), "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060"); } if (cb_state->state == CB_INVALID_COMPLETE || cb_state->state == CB_INVALID_INCOMPLETE) { skip |= ReportInvalidCommandBuffer(cb_state.get(), "vkEndCommandBuffer()"); } else if (CB_RECORDING != cb_state->state) { skip |= LogError( commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00059", "vkEndCommandBuffer(): Cannot call End on %s when not in the RECORDING state. Must first call vkBeginCommandBuffer().", report_data->FormatHandle(commandBuffer).c_str()); } for (const auto &query : cb_state->activeQueries) { skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00061", "vkEndCommandBuffer(): Ending command buffer with in progress query: %s, query %d.", report_data->FormatHandle(query.pool).c_str(), query.query); } if (cb_state->conditional_rendering_active) { skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-None-01978", "vkEndCommandBuffer(): Ending command buffer with active conditional rendering."); } return skip; } bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!cb_state) return false; VkCommandPool cmd_pool = cb_state->createInfo.commandPool; const auto *pool = cb_state->command_pool; if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) { LogObjectList objlist(commandBuffer); objlist.add(cmd_pool); skip |= LogError(objlist, "VUID-vkResetCommandBuffer-commandBuffer-00046", "vkResetCommandBuffer(): Attempt to reset %s created from %s that does NOT have the " "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str()); } skip |= CheckCommandBufferInFlight(cb_state.get(), "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045"); return skip; } static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) { switch (pipelineBindPoint) { case VK_PIPELINE_BIND_POINT_GRAPHICS: return "graphics"; case VK_PIPELINE_BIND_POINT_COMPUTE: return "compute"; case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: return "ray-tracing"; case VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI: return "subpass-shading"; default: return "unknown"; } } bool CoreChecks::ValidateGraphicsPipelineBindPoint(const CMD_BUFFER_STATE *cb_state, const PIPELINE_STATE *pipeline_state) const { bool skip = false; if (cb_state->inheritedViewportDepths.size() != 0) { bool dyn_viewport = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) || IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT); bool dyn_scissor = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) || IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR); if (!dyn_viewport || !dyn_scissor) { skip |= LogError(device, "VUID-vkCmdBindPipeline-commandBuffer-04808", "Graphics pipeline incompatible with viewport/scissor inheritance."); } const auto &create_info = pipeline_state->create_info.graphics; const auto *discard_rectangle_state = LvlFindInChain<VkPipelineDiscardRectangleStateCreateInfoEXT>(create_info.pNext); if (discard_rectangle_state && discard_rectangle_state->discardRectangleCount != 0) { if (!IsDynamic(pipeline_state, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT)) { skip |= LogError(device, "VUID-vkCmdBindPipeline-commandBuffer-04809", "vkCmdBindPipeline(): commandBuffer is a secondary command buffer with " "VkCommandBufferInheritanceViewportScissorInfoNV::viewportScissor2D enabled, pipelineBindPoint is " "VK_PIPELINE_BIND_POINT_GRAPHICS and pipeline was created with " "VkPipelineDiscardRectangleStateCreateInfoEXT::discardRectangleCount = %" PRIu32 ", but without VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT.", discard_rectangle_state->discardRectangleCount); } } } return skip; } bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_BINDPIPELINE); static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")}; skip |= ValidatePipelineBindPoint(cb_state.get(), pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors); const auto pipeline_state = Get<PIPELINE_STATE>(pipeline); assert(pipeline_state); const auto pipeline_state_bind_point = pipeline_state->GetPipelineType(); if (pipelineBindPoint != pipeline_state_bind_point) { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-00779", "Cannot bind a pipeline of type %s to the graphics pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-00780", "Cannot bind a pipeline of type %s to the compute pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-02392", "Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } } else { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= ValidateGraphicsPipelineBindPoint(cb_state.get(), pipeline_state.get()); if (cb_state->activeRenderPass && phys_dev_ext_props.provoking_vertex_props.provokingVertexModePerPipeline == VK_FALSE) { const auto lvl_bind_point = ConvertToLvlBindPoint(pipelineBindPoint); const auto &last_bound_it = cb_state->lastBound[lvl_bind_point]; if (last_bound_it.pipeline_state) { auto last_bound_provoking_vertex_state_ci = LvlFindInChain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>( last_bound_it.pipeline_state->create_info.graphics.pRasterizationState->pNext); auto current_provoking_vertex_state_ci = LvlFindInChain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>( pipeline_state->create_info.graphics.pRasterizationState->pNext); if (last_bound_provoking_vertex_state_ci && !current_provoking_vertex_state_ci) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881", "Previous %s's provokingVertexMode is %s, but %s doesn't chain " "VkPipelineRasterizationProvokingVertexStateCreateInfoEXT.", report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str(), string_VkProvokingVertexModeEXT(last_bound_provoking_vertex_state_ci->provokingVertexMode), report_data->FormatHandle(pipeline).c_str()); } else if (!last_bound_provoking_vertex_state_ci && current_provoking_vertex_state_ci) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881", " %s's provokingVertexMode is %s, but previous %s doesn't chain " "VkPipelineRasterizationProvokingVertexStateCreateInfoEXT.", report_data->FormatHandle(pipeline).c_str(), string_VkProvokingVertexModeEXT(current_provoking_vertex_state_ci->provokingVertexMode), report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str()); } else if (last_bound_provoking_vertex_state_ci && current_provoking_vertex_state_ci && last_bound_provoking_vertex_state_ci->provokingVertexMode != current_provoking_vertex_state_ci->provokingVertexMode) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881", "%s's provokingVertexMode is %s, but previous %s's provokingVertexMode is %s.", report_data->FormatHandle(pipeline).c_str(), string_VkProvokingVertexModeEXT(current_provoking_vertex_state_ci->provokingVertexMode), report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str(), string_VkProvokingVertexModeEXT(last_bound_provoking_vertex_state_ci->provokingVertexMode)); } } } if (cb_state->activeRenderPass && phys_dev_ext_props.sample_locations_props.variableSampleLocations == VK_FALSE) { const auto *sample_locations = LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(pipeline_state->create_info.graphics.pNext); if (sample_locations && sample_locations->sampleLocationsEnable == VK_TRUE && !IsDynamic(pipeline_state.get(), VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT)) { const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info = LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(cb_state->activeRenderPassBeginInfo.pNext); bool found = false; if (sample_locations_begin_info) { for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) { if (sample_locations_begin_info->pPostSubpassSampleLocations[i].subpassIndex == cb_state->activeSubpass) { if (MatchSampleLocationsInfo( &sample_locations_begin_info->pPostSubpassSampleLocations[i].sampleLocationsInfo, &sample_locations->sampleLocationsInfo)) { found = true; } } } } if (!found) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-variableSampleLocations-01525", "vkCmdBindPipeline(): VkPhysicalDeviceSampleLocationsPropertiesEXT::variableSampleLocations " "is false, pipeline is a graphics pipeline with " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsEnable equal to true and without " "VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but the current render pass (%" PRIu32 ") was not begun with any element of " "VkRenderPassSampleLocationsBeginInfoEXT::pPostSubpassSampleLocations subpassIndex " "matching the current subpass index and sampleLocationsInfo matching sampleLocationsInfo of " "VkPipelineSampleLocationsStateCreateInfoEXT the pipeline was created with.", cb_state->activeSubpass); } } } } if (pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) { skip |= LogError( pipeline, "VUID-vkCmdBindPipeline-pipeline-03382", "vkCmdBindPipeline(): Cannot bind a pipeline that was created with the VK_PIPELINE_CREATE_LIBRARY_BIT_KHR flag."); } if (cb_state->transform_feedback_active) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-None-02323", "vkCmdBindPipeline(): transform feedback is active."); } } return skip; } bool CoreChecks::ForbidInheritedViewportScissor(VkCommandBuffer commandBuffer, const CMD_BUFFER_STATE *cb_state, const char* vuid, const char *cmdName) const { bool skip = false; if (cb_state->inheritedViewportDepths.size() != 0) { skip |= LogError( commandBuffer, vuid, "%s: commandBuffer must not have VkCommandBufferInheritanceViewportScissorInfoNV::viewportScissor2D enabled.", cmdName); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETVIEWPORT); skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state.get(), "VUID-vkCmdSetViewport-commandBuffer-04821", "vkCmdSetViewport"); return skip; } bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETSCISSOR); skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state.get(), "VUID-vkCmdSetScissor-viewportScissor2D-04789", "vkCmdSetScissor"); return skip; } bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETEXCLUSIVESCISSORNV); if (!enabled_features.exclusive_scissor_features.exclusiveScissor) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-None-02031", "vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled."); } return skip; } bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_BINDSHADINGRATEIMAGENV); if (!enabled_features.shading_rate_image_features.shadingRateImage) { skip |= LogError(commandBuffer, "VUID-vkCmdBindShadingRateImageNV-None-02058", "vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled."); } if (imageView == VK_NULL_HANDLE) { return skip; } const auto view_state = Get<IMAGE_VIEW_STATE>(imageView); if (!view_state) { skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid " "VkImageView handle."); return skip; } const auto &ivci = view_state->create_info; if (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY) { skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid " "VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY."); } if (ivci.format != VK_FORMAT_R8_UINT) { skip |= LogError( imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02060", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT."); } const auto image_state = view_state->image_state.get(); auto usage = image_state->createInfo.usage; if (!(usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) { skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02061", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been " "created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set."); } bool hit_error = false; // XXX TODO: While the VUID says "each subresource", only the base mip level is // actually used. Since we don't have an existing convenience function to iterate // over all mip levels, just don't bother with non-base levels. const VkImageSubresourceRange &range = view_state->normalized_subresource_range; VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount}; if (image_state) { skip |= VerifyImageLayout(cb_state.get(), image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, "vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063", "VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETVIEWPORTSHADINGRATEPALETTENV); if (!enabled_features.shading_rate_image_features.shadingRateImage) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064", "vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled."); } for (uint32_t i = 0; i < viewportCount; ++i) { auto *palette = &pShadingRatePalettes[i]; if (palette->shadingRatePaletteEntryCount == 0 || palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) { skip |= LogError( commandBuffer, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071", "vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize."); } } return skip; } bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, const char *func_name) const { bool skip = false; const auto vb_state = Get<BUFFER_STATE>(triangles.vertexData); if (vb_state != nullptr && vb_state->createInfo.size <= triangles.vertexOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name); } const auto ib_state = Get<BUFFER_STATE>(triangles.indexData); if (ib_state != nullptr && ib_state->createInfo.size <= triangles.indexOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name); } const auto td_state = Get<BUFFER_STATE>(triangles.transformData); if (td_state != nullptr && td_state->createInfo.size <= triangles.transformOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name); } return skip; } bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, const char *func_name) const { bool skip = false; const auto aabb_state = Get<BUFFER_STATE>(aabbs.aabbData); if (aabb_state != nullptr && aabb_state->createInfo.size > 0 && aabb_state->createInfo.size <= aabbs.offset) { skip |= LogError(device, "VUID-VkGeometryAABBNV-offset-02439", "%s", func_name); } return skip; } bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, const char *func_name) const { bool skip = false; if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) { skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, func_name); } else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) { skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, func_name); } return skip; } bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device, const VkAccelerationStructureCreateInfoNV *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureNV *pAccelerationStructure) const { bool skip = false; if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) { for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) { skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], "vkCreateAccelerationStructureNV():"); } } return skip; } bool CoreChecks::PreCallValidateCreateAccelerationStructureKHR(VkDevice device, const VkAccelerationStructureCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureKHR *pAccelerationStructure) const { bool skip = false; if (pCreateInfo) { const auto buffer_state = Get<BUFFER_STATE>(pCreateInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03614", "VkAccelerationStructureCreateInfoKHR(): buffer must have been created with a usage value containing " "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR."); } if (buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03615", "VkAccelerationStructureCreateInfoKHR(): buffer must not have been created with " "VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT."); } if (pCreateInfo->offset + pCreateInfo->size > buffer_state->createInfo.size) { skip |= LogError( device, "VUID-VkAccelerationStructureCreateInfoKHR-offset-03616", "VkAccelerationStructureCreateInfoKHR(): The sum of offset and size must be less than the size of buffer."); } } } return skip; } bool CoreChecks::ValidateBindAccelerationStructureMemory(VkDevice device, const VkBindAccelerationStructureMemoryInfoNV &info) const { bool skip = false; const auto as_state = Get<ACCELERATION_STRUCTURE_STATE>(info.accelerationStructure); if (!as_state) { return skip; } if (!as_state->GetBoundMemory().empty()) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-03620", "vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object."); } // Validate bound memory range information const auto mem_info = Get<DEVICE_MEMORY_STATE>(info.memory); if (mem_info) { skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info.get(), info.memoryOffset, "vkBindAccelerationStructureMemoryNV()"); skip |= ValidateMemoryTypes(mem_info.get(), as_state->memory_requirements.memoryRequirements.memoryTypeBits, "vkBindAccelerationStructureMemoryNV()", "VUID-VkBindAccelerationStructureMemoryInfoNV-memory-03622"); } // Validate memory requirements alignment if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03623", "vkBindAccelerationStructureMemoryNV(): memoryOffset 0x%" PRIxLEAST64 " must be an integer multiple of the alignment 0x%" PRIxLEAST64 " member of the VkMemoryRequirements structure returned from " "a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV", info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment); } if (mem_info) { // Validate memory requirements size if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-size-03624", "vkBindAccelerationStructureMemoryNV(): The size 0x%" PRIxLEAST64 " member of the VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV must be less than or equal to the size " "of memory minus memoryOffset 0x%" PRIxLEAST64 ".", as_state->memory_requirements.memoryRequirements.size, mem_info->alloc_info.allocationSize - info.memoryOffset); } } return skip; } bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const { bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { skip |= ValidateBindAccelerationStructureMemory(device, pBindInfos[i]); } return skip; } bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void *pData) const { bool skip = false; const auto as_state = Get<ACCELERATION_STRUCTURE_STATE>(accelerationStructure); if (as_state != nullptr) { // TODO: update the fake VUID below once the real one is generated. skip = ValidateMemoryIsBoundToAccelerationStructure( as_state.get(), "vkGetAccelerationStructureHandleNV", "UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX"); } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); skip |= ValidateCmd(cb_state.get(), CMD_BUILDACCELERATIONSTRUCTURESKHR); if (pInfos != NULL) { for (uint32_t info_index = 0; info_index < infoCount; ++info_index) { const auto src_as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfos[info_index].srcAccelerationStructure); const auto dst_as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfos[info_index].dstAccelerationStructure); if (pInfos[info_index].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03667", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must " "have been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[info_index].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03758", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[info_index].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03759", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[info_index].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03760", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03700", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03699", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } skip |= ValidateAccelerationBuffers(info_index, pInfos[info_index], "vkCmdBuildAccelerationStructuresKHR"); } } return skip; } bool CoreChecks::ValidateAccelerationBuffers(uint32_t info_index, const VkAccelerationStructureBuildGeometryInfoKHR &info, const char *func_name) const { bool skip = false; const auto geometry_count = info.geometryCount; const auto *p_geometries = info.pGeometries; const auto *const *const pp_geometries = info.ppGeometries; auto buffer_check = [this, info_index, func_name](uint32_t gi, const VkDeviceOrHostAddressConstKHR address, const char *field) -> bool { const auto itr = buffer_address_map_.find(address.deviceAddress); if (itr != buffer_address_map_.end() && !(itr->second->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR)) { LogObjectList objlist(device); objlist.add(itr->second->Handle()); return LogError(objlist, "VUID-vkCmdBuildAccelerationStructuresKHR-geometry-03673", "%s(): The buffer associated with pInfos[%" PRIu32 "].pGeometries[%" PRIu32 "].%s was not created with VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR.", func_name, info_index, gi, field); } return false; }; // Parameter validation has already checked VUID-VkAccelerationStructureBuildGeometryInfoKHR-pGeometries-03788 // !(pGeometries && ppGeometries) std::function<const VkAccelerationStructureGeometryKHR &(uint32_t)> geom_accessor; if (p_geometries) { geom_accessor = [p_geometries](uint32_t i) -> const VkAccelerationStructureGeometryKHR & { return p_geometries[i]; }; } else if (pp_geometries) { geom_accessor = [pp_geometries](uint32_t i) -> const VkAccelerationStructureGeometryKHR & { // pp_geometries[i] is assumed to be a valid pointer return *pp_geometries[i]; }; } if (geom_accessor) { for (uint32_t geom_index = 0; geom_index < geometry_count; ++geom_index) { const auto &geom_data = geom_accessor(geom_index); switch (geom_data.geometryType) { case VK_GEOMETRY_TYPE_TRIANGLES_KHR: // == VK_GEOMETRY_TYPE_TRIANGLES_NV skip |= buffer_check(geom_index, geom_data.geometry.triangles.vertexData, "geometry.triangles.vertexData"); skip |= buffer_check(geom_index, geom_data.geometry.triangles.indexData, "geometry.triangles.indexData"); skip |= buffer_check(geom_index, geom_data.geometry.triangles.transformData, "geometry.triangles.transformData"); break; case VK_GEOMETRY_TYPE_INSTANCES_KHR: skip |= buffer_check(geom_index, geom_data.geometry.instances.data, "geometry.instances.data"); break; case VK_GEOMETRY_TYPE_AABBS_KHR: // == VK_GEOMETRY_TYPE_AABBS_NV skip |= buffer_check(geom_index, geom_data.geometry.aabbs.data, "geometry.aabbs.data"); break; default: // no-op break; } } } const auto itr = buffer_address_map_.find(info.scratchData.deviceAddress); if (itr != buffer_address_map_.end() && !(itr->second->createInfo.usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03674", "vkBuildAccelerationStructuresKHR(): The buffer associated with pInfos[%" PRIu32 "].scratchData.deviceAddress was not created with VK_BUFFER_USAGE_STORAGE_BUFFER_BIT bit.", info_index); } return skip; } bool CoreChecks::PreCallValidateBuildAccelerationStructuresKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; for (uint32_t i = 0; i < infoCount; ++i) { const auto src_as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfos[i].srcAccelerationStructure); const auto dst_as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfos[i].dstAccelerationStructure); if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03667", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have " "been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03758", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[i].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03759", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[i].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03760", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03700", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03699", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_BUILDACCELERATIONSTRUCTURENV); if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) { for (uint32_t i = 0; i < pInfo->geometryCount; i++) { skip |= ValidateGeometryNV(pInfo->pGeometries[i], "vkCmdBuildAccelerationStructureNV():"); } } if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241", "vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.", pInfo->geometryCount); } const auto dst_as_state = Get<ACCELERATION_STRUCTURE_STATE>(dst); const auto src_as_state = Get<ACCELERATION_STRUCTURE_STATE>(src); const auto scratch_buffer_state = Get<BUFFER_STATE>(scratch); if (dst_as_state != nullptr && pInfo != nullptr) { if (dst_as_state->create_infoNV.info.type != pInfo->type) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type" "[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].", string_VkAccelerationStructureTypeNV(dst_as_state->create_infoNV.info.type), string_VkAccelerationStructureTypeNV(pInfo->type)); } if (dst_as_state->create_infoNV.info.flags != pInfo->flags) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags" "[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].", dst_as_state->create_infoNV.info.flags, pInfo->flags); } if (dst_as_state->create_infoNV.info.instanceCount < pInfo->instanceCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount " "[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].", dst_as_state->create_infoNV.info.instanceCount, pInfo->instanceCount); } if (dst_as_state->create_infoNV.info.geometryCount < pInfo->geometryCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount" "[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].", dst_as_state->create_infoNV.info.geometryCount, pInfo->geometryCount); } else { for (uint32_t i = 0; i < pInfo->geometryCount; i++) { const VkGeometryDataNV &create_geometry_data = dst_as_state->create_infoNV.info.pGeometries[i].geometry; const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry; if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) { skip |= LogError( commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].", i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount); break; } if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) { skip |= LogError( commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].", i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount); break; } if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].", i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs); break; } } } } if (dst_as_state != nullptr) { skip |= ValidateMemoryIsBoundToAccelerationStructure( dst_as_state.get(), "vkCmdBuildAccelerationStructureNV()", "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV"); } if (update == VK_TRUE) { if (src == VK_NULL_HANDLE) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE."); } else { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02490", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before " "with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in " "VkAccelerationStructureInfoNV::flags."); } } if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) { skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery, "vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() " "has not been called for update scratch memory.", report_data->FormatHandle(dst_as_state->acceleration_structure()).c_str()); // Use requirements fetched at create time } if (scratch_buffer_state != nullptr && dst_as_state != nullptr && dst_as_state->update_scratch_memory_requirements.memoryRequirements.size > (scratch_buffer_state->createInfo.size - scratchOffset)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02492", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the " "VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with " "VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and " "VkAccelerationStructureMemoryRequirementsInfoNV::type set to " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than " "or equal to the size of scratch minus scratchOffset"); } } else { if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) { skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery, "vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but " "vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.", report_data->FormatHandle(dst_as_state->acceleration_structure()).c_str()); // Use requirements fetched at create time } if (scratch_buffer_state != nullptr && dst_as_state != nullptr && dst_as_state->build_scratch_memory_requirements.memoryRequirements.size > (scratch_buffer_state->createInfo.size - scratchOffset)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02491", "vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the " "VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with " "VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and " "VkAccelerationStructureMemoryRequirementsInfoNV::type set to " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than " "or equal to the size of scratch minus scratchOffset"); } } if (instanceData != VK_NULL_HANDLE) { const auto buffer_state = Get<BUFFER_STATE>(instanceData); if (buffer_state) { skip |= ValidateBufferUsageFlags(buffer_state.get(), VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true, "VUID-VkAccelerationStructureInfoNV-instanceData-02782", "vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV"); } } if (scratch_buffer_state) { skip |= ValidateBufferUsageFlags(scratch_buffer_state.get(), VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true, "VUID-VkAccelerationStructureInfoNV-scratch-02781", "vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV"); } return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_COPYACCELERATIONSTRUCTURENV); const auto dst_as_state = Get<ACCELERATION_STRUCTURE_STATE>(dst); const auto src_as_state = Get<ACCELERATION_STRUCTURE_STATE>(src); if (dst_as_state != nullptr) { skip |= ValidateMemoryIsBoundToAccelerationStructure( dst_as_state.get(), "vkCmdBuildAccelerationStructureNV()", "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV"); } if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) { if (src_as_state != nullptr && (!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-src-03411", "vkCmdCopyAccelerationStructureNV(): src must have been built with " "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is " "VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV."); } } if (!(mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV || mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410", "vkCmdCopyAccelerationStructureNV():mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR" "or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR."); } return skip; } bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks *pAllocator) const { const auto as_state = Get<ACCELERATION_STRUCTURE_STATE>(accelerationStructure); bool skip = false; if (as_state) { skip |= ValidateObjectNotInUse(as_state.get(), "vkDestroyAccelerationStructureNV", "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442"); } return skip; } bool CoreChecks::PreCallValidateDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks *pAllocator) const { const auto as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(accelerationStructure); bool skip = false; if (as_state) { skip |= ValidateObjectNotInUse(as_state.get(), "vkDestroyAccelerationStructureKHR", "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442"); } if (pAllocator && !as_state->allocator) { skip |= LogError(device, "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02444", "vkDestroyAccelerationStructureKH:If no VkAllocationCallbacks were provided when accelerationStructure" "was created, pAllocator must be NULL."); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV *pViewportWScalings) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETVIEWPORTWSCALINGNV); return skip; } bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETLINEWIDTH); return skip; } bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETLINESTIPPLEEXT); return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETDEPTHBIAS); if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBias-depthBiasClamp-00790", "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must " "be set to 0.0."); } return skip; } bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETBLENDCONSTANTS); return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETDEPTHBOUNDS); // The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs if (!IsExtEnabled(device_extensions.vk_ext_depth_range_unrestricted)) { if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) { // Also VUID-vkCmdSetDepthBounds-minDepthBounds-00600 skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-minDepthBounds-02508", "vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and minDepthBounds " "(=%f) is not within the [0.0, 1.0] range.", minDepthBounds); } if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) { // Also VUID-vkCmdSetDepthBounds-maxDepthBounds-00601 skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-maxDepthBounds-02509", "vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and maxDepthBounds " "(=%f) is not within the [0.0, 1.0] range.", maxDepthBounds); } } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETSTENCILCOMPAREMASK); return skip; } bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETSTENCILWRITEMASK); return skip; } bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETSTENCILREFERENCE); return skip; } bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_BINDDESCRIPTORSETS); // Track total count of dynamic descriptor types to make sure we have an offset for each one uint32_t total_dynamic_descriptors = 0; string error_string = ""; const auto pipeline_layout = Get<PIPELINE_LAYOUT_STATE>(layout); for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) { const auto descriptor_set = Get<cvdescriptorset::DescriptorSet>(pDescriptorSets[set_idx]); if (descriptor_set) { // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout if (!VerifySetLayoutCompatibility(report_data, descriptor_set.get(), pipeline_layout.get(), set_idx + firstSet, error_string)) { skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358", "vkCmdBindDescriptorSets(): descriptorSet #%u being bound is not compatible with overlapping " "descriptorSetLayout at index %u of " "%s due to: %s.", set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str()); } auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount(); if (set_dynamic_descriptor_count) { // First make sure we won't overstep bounds of pDynamicOffsets array if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) { // Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "vkCmdBindDescriptorSets(): descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u " "dynamicOffsets are left in " "pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.", set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(), descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors)); // Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from // testing against the "short tail" we're skipping below. total_dynamic_descriptors = dynamicOffsetCount; } else { // Validate dynamic offsets and Dynamic Offset Minimums // offset for all sets (pDynamicOffsets) uint32_t cur_dyn_offset = total_dynamic_descriptors; // offset into this descriptor set uint32_t set_dyn_offset = 0; const auto &dsl = descriptor_set->GetLayout(); const auto binding_count = dsl->GetBindingCount(); const auto &limits = phys_dev_props.limits; for (uint32_t i = 0; i < binding_count; i++) { const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(i); // skip checking binding if not needed if (cvdescriptorset::IsDynamicDescriptor(binding->descriptorType) == false) { continue; } // If a descriptor set has only binding 0 and 2 the binding_index will be 0 and 2 const uint32_t binding_index = binding->binding; const uint32_t descriptorCount = binding->descriptorCount; // Need to loop through each descriptor count inside the binding // if descriptorCount is zero the binding with a dynamic descriptor type does not count for (uint32_t j = 0; j < descriptorCount; j++) { const uint32_t offset = pDynamicOffsets[cur_dyn_offset]; if (offset == 0) { // offset of zero is equivalent of not having the dynamic offset cur_dyn_offset++; set_dyn_offset++; continue; } // Validate alignment with limit if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) && (SafeModulo(offset, limits.minUniformBufferOffsetAlignment) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of " "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".", cur_dyn_offset, offset, limits.minUniformBufferOffsetAlignment); } if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) && (SafeModulo(offset, limits.minStorageBufferOffsetAlignment) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of " "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".", cur_dyn_offset, offset, limits.minStorageBufferOffsetAlignment); } auto *descriptor = descriptor_set->GetDescriptorFromDynamicOffsetIndex(set_dyn_offset); assert(descriptor != nullptr); // Currently only GeneralBuffer are dynamic and need to be checked if (descriptor->GetClass() == cvdescriptorset::DescriptorClass::GeneralBuffer) { const auto *buffer_descriptor = static_cast<const cvdescriptorset::BufferDescriptor *>(descriptor); const VkDeviceSize bound_range = buffer_descriptor->GetRange(); const VkDeviceSize bound_offset = buffer_descriptor->GetOffset(); //NOTE: null / invalid buffers may show up here, errors are raised elsewhere for this. const auto buffer_state = buffer_descriptor->GetBufferState(); // Validate offset didn't go over buffer if ((bound_range == VK_WHOLE_SIZE) && (offset > 0)) { LogObjectList objlist(commandBuffer); objlist.add(pDescriptorSets[set_idx]); objlist.add(buffer_descriptor->GetBuffer()); skip |= LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x, but must be zero since " "the buffer descriptor's range is VK_WHOLE_SIZE in descriptorSet #%u binding #%u " "descriptor[%u].", cur_dyn_offset, offset, set_idx, binding_index, j); } else if (buffer_state && (bound_range != VK_WHOLE_SIZE) && ((offset + bound_range + bound_offset) > buffer_state->createInfo.size)) { LogObjectList objlist(commandBuffer); objlist.add(pDescriptorSets[set_idx]); objlist.add(buffer_descriptor->GetBuffer()); skip |= LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x which when added to the " "buffer descriptor's range (0x%" PRIxLEAST64 ") is greater than the size of the buffer (0x%" PRIxLEAST64 ") in descriptorSet #%u binding #%u descriptor[%u].", cur_dyn_offset, offset, bound_range, buffer_state->createInfo.size, set_idx, binding_index, j); } } cur_dyn_offset++; set_dyn_offset++; } // descriptorCount loop } // bindingCount loop // Keep running total of dynamic descriptor count to verify at the end total_dynamic_descriptors += set_dynamic_descriptor_count; } } if (descriptor_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE) { skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-04616", "vkCmdBindDescriptorSets(): pDescriptorSets[%" PRIu32 "] was allocated from a pool that was created with VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE.", set_idx); } } else { skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter", "vkCmdBindDescriptorSets(): Attempt to bind %s that doesn't exist!", report_data->FormatHandle(pDescriptorSets[set_idx]).c_str()); } } // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound if (total_dynamic_descriptors != dynamicOffsetCount) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "vkCmdBindDescriptorSets(): Attempting to bind %u descriptorSets with %u dynamic descriptors, but " "dynamicOffsetCount is %u. It should " "exactly match the number of dynamic descriptors.", setCount, total_dynamic_descriptors, dynamicOffsetCount); } // firstSet and descriptorSetCount sum must be less than setLayoutCount if ((firstSet + setCount) > static_cast<uint32_t>(pipeline_layout->set_layouts.size())) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindDescriptorSets-firstSet-00360", "vkCmdBindDescriptorSets(): Sum of firstSet (%u) and descriptorSetCount (%u) is greater than " "VkPipelineLayoutCreateInfo::setLayoutCount " "(%zu) when pipeline layout was created", firstSet, setCount, pipeline_layout->set_layouts.size()); } static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361")}; skip |= ValidatePipelineBindPoint(cb_state.get(), pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors); return skip; } // Validates that the supplied bind point is supported for the command buffer (vis. the command pool) // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint // TODO add vkCmdBindPipeline bind_point validation using this call. bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) const { bool skip = false; auto pool = cb_state->command_pool; if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)), }; const auto &qfp = physical_device_state->queue_family_properties[pool->queueFamilyIndex]; if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) { const std::string &error = bind_errors.at(bind_point); LogObjectList objlist(cb_state->commandBuffer()); objlist.add(cb_state->createInfo.commandPool); skip |= LogError(objlist, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name, report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(), string_VkPipelineBindPoint(bind_point)); } } return skip; } bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); const char *func_name = "vkCmdPushDescriptorSetKHR()"; bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_PUSHDESCRIPTORSETKHR); static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")}; skip |= ValidatePipelineBindPoint(cb_state.get(), pipelineBindPoint, func_name, bind_errors); const auto layout_data = Get<PIPELINE_LAYOUT_STATE>(layout); // Validate the set index points to a push descriptor set and is in range if (layout_data) { const auto &set_layouts = layout_data->set_layouts; if (set < set_layouts.size()) { const auto &dsl = set_layouts[set]; if (dsl) { if (!dsl->IsPushDescriptor()) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout).c_str()); } else { // Create an empty proxy in order to use the existing descriptor set update validation // TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we // don't have to do this. cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this); skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name); } } } else { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size())); } } return skip; } bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const { const auto buffer_state = Get<BUFFER_STATE>(buffer); const auto cb_node = Get<CMD_BUFFER_STATE>(commandBuffer); assert(buffer_state); assert(cb_node); bool skip = ValidateBufferUsageFlags(buffer_state.get(), VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"); skip |= ValidateCmd(cb_node.get(), CMD_BINDINDEXBUFFER); skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434"); const auto offset_align = GetIndexAlignment(indexType); if (offset % offset_align) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00432", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset, string_VkIndexType(indexType)); } if (offset >= buffer_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00431", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", offset, buffer_state->requirements.size, report_data->FormatHandle(buffer_state->buffer()).c_str()); } return skip; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_BINDVERTEXBUFFERS); for (uint32_t i = 0; i < bindingCount; ++i) { const auto buffer_state = Get<BUFFER_STATE>(pBuffers[i]); if (buffer_state) { skip |= ValidateBufferUsageFlags(buffer_state.get(), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), "vkCmdBindVertexBuffers()", "VUID-vkCmdBindVertexBuffers-pBuffers-00628"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers-pOffsets-00626", "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } } } return skip; } // Validate that an image's sampleCount matches the requirement for a specific API call bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location, const std::string &msgCode) const { bool skip = false; if (image_state->createInfo.samples != sample_count) { skip = LogError(image_state->image(), msgCode, "%s for %s was created with a sample count of %s but must be %s.", location, report_data->FormatHandle(image_state->image()).c_str(), string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count)); } return skip; } bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); const auto dst_buffer_state = Get<BUFFER_STATE>(dstBuffer); assert(dst_buffer_state); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state.get(), "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buffer_state.get(), VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmd(cb_state.get(), CMD_UPDATEBUFFER); skip |= ValidateProtectedBuffer(cb_state.get(), dst_buffer_state.get(), "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01813"); skip |= ValidateUnprotectedBuffer(cb_state.get(), dst_buffer_state.get(), "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01814"); if (dstOffset >= dst_buffer_state->createInfo.size) { skip |= LogError( commandBuffer, "VUID-vkCmdUpdateBuffer-dstOffset-00032", "vkCmdUpdateBuffer() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset, dst_buffer_state->createInfo.size, report_data->FormatHandle(dst_buffer_state->buffer()).c_str()); } else if (dataSize > dst_buffer_state->createInfo.size - dstOffset) { skip |= LogError(commandBuffer, "VUID-vkCmdUpdateBuffer-dataSize-00033", "vkCmdUpdateBuffer() dataSize (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s) minus dstOffset (0x%" PRIxLEAST64 ").", dataSize, dst_buffer_state->createInfo.size, report_data->FormatHandle(dst_buffer_state->buffer()).c_str(), dstOffset); } return skip; } bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETEVENT); Location loc(Func::vkCmdSetEvent, Field::stageMask); LogObjectList objects(commandBuffer); skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } bool CoreChecks::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR *pDependencyInfo) const { LogObjectList objects(commandBuffer); objects.add(event); const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; if (!enabled_features.synchronization2_features.synchronization2) { skip |= LogError(commandBuffer, "VUID-vkCmdSetEvent2KHR-synchronization2-03824", "vkCmdSetEvent2KHR(): Synchronization2 feature is not enabled"); } skip |= ValidateCmd(cb_state.get(), CMD_SETEVENT); Location loc(Func::vkCmdSetEvent2KHR, Field::pDependencyInfo); if (pDependencyInfo->dependencyFlags != 0) { skip |= LogError(objects, "VUID-vkCmdSetEvent2KHR-dependencyFlags-03825", "%s (%s) must be 0", loc.dot(Field::dependencyFlags).Message().c_str(), string_VkDependencyFlags(pDependencyInfo->dependencyFlags).c_str()); } skip |= ValidateDependencyInfo(objects, loc, cb_state.get(), pDependencyInfo); return skip; } bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdResetEvent, Field::stageMask); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_RESETEVENT); skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } bool CoreChecks::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR stageMask) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdResetEvent2KHR, Field::stageMask); bool skip = false; if (!enabled_features.synchronization2_features.synchronization2) { skip |= LogError(commandBuffer, "VUID-vkCmdResetEvent2KHR-synchronization2-03829", "vkCmdResetEvent2KHR(): Synchronization2 feature is not enabled"); } skip |= ValidateCmd(cb_state.get(), CMD_RESETEVENT); skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags2KHR inflags) { return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0; } // transient helper struct for checking parts of VUID 02285 struct RenderPassDepState { using Location = core_error::Location; using Func = core_error::Func; using Struct = core_error::Struct; using Field = core_error::Field; const CoreChecks *core; const std::string func_name; const std::string vuid; uint32_t active_subpass; const VkRenderPass rp_handle; const VkPipelineStageFlags2KHR disabled_features; const std::vector<uint32_t> &self_dependencies; const safe_VkSubpassDependency2 *dependencies; RenderPassDepState(const CoreChecks *c, const std::string &f, const std::string &v, uint32_t subpass, const VkRenderPass handle, const DeviceFeatures &features, const std::vector<uint32_t> &self_deps, const safe_VkSubpassDependency2 *deps) : core(c), func_name(f), vuid(v), active_subpass(subpass), rp_handle(handle), disabled_features(sync_utils::DisabledPipelineStages(features)), self_dependencies(self_deps), dependencies(deps) {} VkMemoryBarrier2KHR GetSubPassDepBarrier(const safe_VkSubpassDependency2 &dep) { VkMemoryBarrier2KHR result; const auto *barrier = LvlFindInChain<VkMemoryBarrier2KHR>(dep.pNext); if (barrier) { result = *barrier; } else { result.srcStageMask = dep.srcStageMask; result.dstStageMask = dep.dstStageMask; result.srcAccessMask = dep.srcAccessMask; result.dstAccessMask = dep.dstAccessMask; } return result; } bool ValidateStage(const Location &loc, VkPipelineStageFlags2KHR src_stage_mask, VkPipelineStageFlags2KHR dst_stage_mask) { // Look for matching mask in any self-dependency bool match = false; for (const auto self_dep_index : self_dependencies) { const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]); auto sub_src_stage_mask = sync_utils::ExpandPipelineStages(sub_dep.srcStageMask, sync_utils::kAllQueueTypes, disabled_features); auto sub_dst_stage_mask = sync_utils::ExpandPipelineStages(sub_dep.dstStageMask, sync_utils::kAllQueueTypes, disabled_features); match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (src_stage_mask == (sub_src_stage_mask & src_stage_mask))) && ((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask))); if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency srcAccessMask " "for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::srcStageMask).Message().c_str(), src_stage_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency dstAccessMask " "for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::dstStageMask).Message().c_str(), dst_stage_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } bool ValidateAccess(const Location &loc, VkAccessFlags2KHR src_access_mask, VkAccessFlags2KHR dst_access_mask) { bool match = false; for (const auto self_dep_index : self_dependencies) { const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]); match = (src_access_mask == (sub_dep.srcAccessMask & src_access_mask)) && (dst_access_mask == (sub_dep.dstAccessMask & dst_access_mask)); if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency " "srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::srcAccessMask).Message().c_str(), src_access_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency " "dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::dstAccessMask).Message().c_str(), dst_access_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } bool ValidateDependencyFlag(VkDependencyFlags dependency_flags) { bool match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; match = sub_dep.dependencyFlags == dependency_flags; if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any " "self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", func_name.c_str(), dependency_flags, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } }; // Validate VUs for Pipeline Barriers that are within a renderPass // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, VkDependencyFlags dependency_flags, uint32_t mem_barrier_count, const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count, const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) const { bool skip = false; const auto& rp_state = cb_state->activeRenderPass; RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier-pDependencies-02285", cb_state->activeSubpass, rp_state->renderPass(), enabled_features, rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies); if (state.self_dependencies.size() == 0) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s Barriers cannot be set during subpass %d of %s with no self-dependency specified.", outer_loc.Message().c_str(), state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str()); return skip; } // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass]; skip |= state.ValidateStage(outer_loc, src_stage_mask, dst_stage_mask); if (0 != buffer_mem_barrier_count) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(), buffer_mem_barrier_count, state.active_subpass, report_data->FormatHandle(rp_state->renderPass()).c_str()); } for (uint32_t i = 0; i < mem_barrier_count; ++i) { const auto &mem_barrier = mem_barriers[i]; Location loc(outer_loc.function, Struct::VkMemoryBarrier, Field::pMemoryBarriers, i); skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask); } for (uint32_t i = 0; i < image_mem_barrier_count; ++i) { const auto &img_barrier = image_barriers[i]; Location loc(outer_loc.function, Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i); skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask); if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182", "%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known if (VK_NULL_HANDLE != cb_state->activeFramebuffer) { skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc, state.rp_handle, img_barrier); } } skip |= state.ValidateDependencyFlag(dependency_flags); return skip; } bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state, const VkDependencyInfoKHR *dep_info) const { bool skip = false; const auto& rp_state = cb_state->activeRenderPass; RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier2KHR-pDependencies-02285", cb_state->activeSubpass, rp_state->renderPass(), enabled_features, rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies); if (state.self_dependencies.size() == 0) { skip |= LogError(state.rp_handle, state.vuid, "%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", state.func_name.c_str(), state.active_subpass, report_data->FormatHandle(rp_state->renderPass()).c_str()); return skip; } // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass]; for (uint32_t i = 0; i < dep_info->memoryBarrierCount; ++i) { const auto &mem_barrier = dep_info->pMemoryBarriers[i]; Location loc(outer_loc.function, Struct::VkMemoryBarrier2KHR, Field::pMemoryBarriers, i); skip |= state.ValidateStage(loc, mem_barrier.srcStageMask, mem_barrier.dstStageMask); skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask); } if (0 != dep_info->bufferMemoryBarrierCount) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(), dep_info->bufferMemoryBarrierCount, state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str()); } for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; ++i) { const auto &img_barrier = dep_info->pImageMemoryBarriers[i]; Location loc(outer_loc.function, Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i); skip |= state.ValidateStage(loc, img_barrier.srcStageMask, img_barrier.dstStageMask); skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask); if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-srcQueueFamilyIndex-01182", "%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known if (VK_NULL_HANDLE != cb_state->activeFramebuffer) { skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc, state.rp_handle, img_barrier); } } skip |= state.ValidateDependencyFlag(dep_info->dependencyFlags); return skip; } bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; // these are always allowed. stage_mask &= ~(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR | VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_HOST_BIT_KHR); if (stage_mask == 0) { return skip; } static const std::map<VkPipelineStageFlags2KHR, VkQueueFlags> metaFlags{ {VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT}, {VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, }; for (const auto &entry : metaFlags) { if (((entry.first & stage_mask) != 0) && ((entry.second & queue_flags) == 0)) { const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, entry.first); skip |= LogError(objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.", loc.Message().c_str(), sync_utils::StringPipelineStageFlags(entry.first).c_str(), string_VkQueueFlags(queue_flags).c_str()); } stage_mask &= ~entry.first; } if (stage_mask == 0) { return skip; } auto supported_flags = sync_utils::ExpandPipelineStages(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR, queue_flags); auto bad_flags = stage_mask & ~supported_flags; // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags for (size_t i = 0; i < sizeof(bad_flags) * 8; i++) { VkPipelineStageFlags2KHR bit = (1ULL << i) & bad_flags; if (bit) { const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, bit); skip |= LogError( objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.", loc.Message().c_str(), sync_utils::StringPipelineStageFlags(bit).c_str(), string_VkQueueFlags(queue_flags).c_str()); } } return skip; } bool CoreChecks::ValidatePipelineStageFeatureEnables(const LogObjectList &objects, const Location &loc, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; if (!enabled_features.synchronization2_features.synchronization2 && stage_mask == 0) { const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, 0); std::stringstream msg; msg << loc.Message() << " must not be 0 unless synchronization2 is enabled."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } auto disabled_stages = sync_utils::DisabledPipelineStages(enabled_features); auto bad_bits = stage_mask & disabled_stages; if (bad_bits == 0) { return skip; } for (size_t i = 0; i < sizeof(bad_bits) * 8; i++) { VkPipelineStageFlags2KHR bit = 1ULL << i; if (bit & bad_bits) { const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, bit); std::stringstream msg; msg << loc.Message() << " includes " << sync_utils::StringPipelineStageFlags(bit) << " when the device does not have " << sync_vuid_maps::kFeatureNameMap.at(bit) << " feature enabled."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } } return skip; } bool CoreChecks::ValidatePipelineStage(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; skip |= ValidateStageMasksAgainstQueueCapabilities(objects, loc, queue_flags, stage_mask); skip |= ValidatePipelineStageFeatureEnables(objects, loc, stage_mask); return skip; } bool CoreChecks::ValidateAccessMask(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags, VkAccessFlags2KHR access_mask, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; // Early out if all commands set if ((stage_mask & VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR) != 0) return skip; // or if only generic memory accesses are specified (or we got a 0 mask) access_mask &= ~(VK_ACCESS_2_MEMORY_READ_BIT_KHR | VK_ACCESS_2_MEMORY_WRITE_BIT_KHR); if (access_mask == 0) return skip; auto expanded_stages = sync_utils::ExpandPipelineStages(stage_mask, queue_flags); // TODO: auto valid_accesses = sync_utils::CompatibleAccessMask(expanded_stages); auto bad_accesses = (access_mask & ~valid_accesses); if (bad_accesses == 0) { return skip; } for (size_t i = 0; i < sizeof(bad_accesses) * 8; i++) { VkAccessFlags2KHR bit = (1ULL << i); if (bad_accesses & bit) { const auto& vuid = sync_vuid_maps::GetBadAccessFlagsVUID(loc, bit); std::stringstream msg; msg << loc.Message() << " bit " << sync_utils::StringAccessFlags(bit) << " is not supported by stage mask (" << sync_utils::StringPipelineStageFlags(stage_mask) << ")."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } } return skip; } bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount, size_t firstEventIndex, VkPipelineStageFlags2KHR sourceStageMask, EventToStageMap *localEventToStageMap) { bool skip = false; VkPipelineStageFlags2KHR stage_mask = 0; const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size()); for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) { auto event = pCB->events[event_index]; auto event_data = localEventToStageMap->find(event); if (event_data != localEventToStageMap->end()) { stage_mask |= event_data->second; } else { auto global_event_data = state_data->Get<EVENT_STATE>(event); if (!global_event_data) { skip |= state_data->LogError(event, kVUID_Core_DrawState_InvalidEvent, "%s cannot be waited on if it has never been set.", state_data->report_data->FormatHandle(event).c_str()); } else { stage_mask |= global_event_data->stageMask; } } } // TODO: Need to validate that host_bit is only set if set event is called // but set event can be called at any time. if (sourceStageMask != stage_mask && sourceStageMask != (stage_mask | VK_PIPELINE_STAGE_HOST_BIT)) { skip |= state_data->LogError( pCB->commandBuffer(), "VUID-vkCmdWaitEvents-srcStageMask-parameter", "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%" PRIx64 " which must be the bitwise OR of " "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with " "vkSetEvent but instead is 0x%" PRIx64 ".", sourceStageMask, stage_mask); } return skip; } bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); auto queue_flags = cb_state->GetQueueFlags(); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdWaitEvents); skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask); skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask); skip |= ValidateCmd(cb_state.get(), CMD_WAITEVENTS); skip |= ValidateBarriers(loc.dot(Field::pDependencyInfo), cb_state.get(), srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); for (uint32_t i = 0; i < bufferMemoryBarrierCount; ++i) { if (pBufferMemoryBarriers[i].srcQueueFamilyIndex != pBufferMemoryBarriers[i].dstQueueFamilyIndex) { skip |= LogError(commandBuffer, "VUID-vkCmdWaitEvents-srcQueueFamilyIndex-02803", "vkCmdWaitEvents(): pBufferMemoryBarriers[%" PRIu32 "] has different srcQueueFamilyIndex (%" PRIu32 ") and dstQueueFamilyIndex (%" PRIu32 ").", i, pBufferMemoryBarriers[i].srcQueueFamilyIndex, pBufferMemoryBarriers[i].dstQueueFamilyIndex); } } for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) { if (pImageMemoryBarriers[i].srcQueueFamilyIndex != pImageMemoryBarriers[i].dstQueueFamilyIndex) { skip |= LogError(commandBuffer, "VUID-vkCmdWaitEvents-srcQueueFamilyIndex-02803", "vkCmdWaitEvents(): pImageMemoryBarriers[%" PRIu32 "] has different srcQueueFamilyIndex (%" PRIu32 ") and dstQueueFamilyIndex (%" PRIu32 ").", i, pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex); } } return skip; } bool CoreChecks::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; if (!enabled_features.synchronization2_features.synchronization2) { skip |= LogError(commandBuffer, "VUID-vkCmdWaitEvents2KHR-synchronization2-03836", "vkCmdWaitEvents2KHR(): Synchronization2 feature is not enabled"); } for (uint32_t i = 0; (i < eventCount) && !skip; i++) { LogObjectList objects(commandBuffer); objects.add(pEvents[i]); Location loc(Func::vkCmdWaitEvents2KHR, Field::pDependencyInfos, i); if (pDependencyInfos[i].dependencyFlags != 0) { skip |= LogError(objects, "VUID-vkCmdWaitEvents2KHR-dependencyFlags-03844", "%s (%s) must be 0.", loc.dot(Field::dependencyFlags).Message().c_str(), string_VkDependencyFlags(pDependencyInfos[i].dependencyFlags).c_str()); } skip |= ValidateDependencyInfo(objects, loc, cb_state.get(), &pDependencyInfos[i]); } skip |= ValidateCmd(cb_state.get(), CMD_WAITEVENTS); return skip; } void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); // The StateTracker added will add to the events vector. auto first_event_index = cb_state->events.size(); StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); auto event_added_count = cb_state->events.size() - first_event_index; const auto cb_state_const = std::static_pointer_cast<const CMD_BUFFER_STATE>(cb_state); cb_state->eventUpdates.emplace_back( [cb_state_const, event_added_count, first_event_index, sourceStageMask]( const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) { if (!do_validate) return false; return ValidateEventStageMask(device_data, cb_state_const.get(), event_added_count, first_event_index, sourceStageMask, localEventToStageMap); }); TransitionImageLayouts(cb_state.get(), imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PreCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); // The StateTracker added will add to the events vector. auto first_event_index = cb_state->events.size(); StateTracker::PreCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos); auto event_added_count = cb_state->events.size() - first_event_index; const auto cb_state_const = std::static_pointer_cast<const CMD_BUFFER_STATE>(cb_state); for (uint32_t i = 0; i < eventCount; i++) { const auto &dep_info = pDependencyInfos[i]; auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info); cb_state->eventUpdates.emplace_back( [cb_state_const, event_added_count, first_event_index, stage_masks]( const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) { if (!do_validate) return false; return ValidateEventStageMask(device_data, cb_state_const.get(), event_added_count, first_event_index, stage_masks.src, localEventToStageMap); }); TransitionImageLayouts(cb_state.get(), dep_info.imageMemoryBarrierCount, dep_info.pImageMemoryBarriers); } } void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); RecordBarriers(Func::vkCmdWaitEvents, cb_state.get(), bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); for (uint32_t i = 0; i < eventCount; i++) { const auto &dep_info = pDependencyInfos[i]; RecordBarriers(Func::vkCmdWaitEvents2KHR, cb_state.get(), dep_info); } } bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); auto queue_flags = cb_state->GetQueueFlags(); Location loc(Func::vkCmdPipelineBarrier); skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask); skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask); skip |= ValidateCmd(cb_state.get(), CMD_PIPELINEBARRIER); if (cb_state->activeRenderPass && !cb_state->activeRenderPass->use_dynamic_rendering) { skip |= ValidateRenderPassPipelineBarriers(loc, cb_state.get(), srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if (skip) return true; // Early return to avoid redundant errors from below calls } else { if (dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { skip = LogError(objects, "VUID-vkCmdPipelineBarrier-dependencyFlags-01186", "%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance", loc.dot(Field::dependencyFlags).Message().c_str()); } } skip |= ValidateBarriers(loc, cb_state.get(), srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } bool CoreChecks::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdPipelineBarrier2KHR, Field::pDependencyInfo); if (!enabled_features.synchronization2_features.synchronization2) { skip |= LogError(commandBuffer, "VUID-vkCmdPipelineBarrier2KHR-synchronization2-03848", "vkCmdPipelineBarrier2KHR(): Synchronization2 feature is not enabled"); } skip |= ValidateCmd(cb_state.get(), CMD_PIPELINEBARRIER); if (cb_state->activeRenderPass) { skip |= ValidateRenderPassPipelineBarriers(loc, cb_state.get(), pDependencyInfo); if (skip) return true; // Early return to avoid redundant errors from below calls } else { if (pDependencyInfo->dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { skip = LogError(objects, "VUID-vkCmdPipelineBarrier2KHR-dependencyFlags-01186", "%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance", loc.dot(Field::dependencyFlags).Message().c_str()); } } skip |= ValidateDependencyInfo(objects, loc, cb_state.get(), pDependencyInfo); return skip; } void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); RecordBarriers(Func::vkCmdPipelineBarrier, cb_state.get(), bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); TransitionImageLayouts(cb_state.get(), imageMemoryBarrierCount, pImageMemoryBarriers); StateTracker::PreCallRecordCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); RecordBarriers(Func::vkCmdPipelineBarrier2KHR, cb_state.get(), *pDependencyInfo); TransitionImageLayouts(cb_state.get(), pDependencyInfo->imageMemoryBarrierCount, pDependencyInfo->pImageMemoryBarriers); StateTracker::PreCallRecordCmdPipelineBarrier2KHR(commandBuffer, pDependencyInfo); } bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, uint32_t index, CMD_TYPE cmd, const ValidateBeginQueryVuids *vuids) const { bool skip = false; const auto query_pool_state = Get<QUERY_POOL_STATE>(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; const char *cmd_name = CommandTypeString(cmd); if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBeginQuery-queryType-02804", "%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name); } // Check for nested queries if (cb_state->activeQueries.size()) { for (const auto &a_query : cb_state->activeQueries) { auto active_query_pool_state = Get<QUERY_POOL_STATE>(a_query.pool); if (active_query_pool_state->createInfo.queryType == query_pool_ci.queryType && a_query.index == index) { LogObjectList obj_list(cb_state->commandBuffer()); obj_list.add(query_obj.pool); obj_list.add(a_query.pool); skip |= LogError(obj_list, vuids->vuid_dup_query_type, "%s: Within the same command buffer %s, query %d from pool %s has same queryType as active query " "%d from pool %s.", cmd_name, report_data->FormatHandle(cb_state->commandBuffer()).c_str(), query_obj.index, report_data->FormatHandle(query_obj.pool).c_str(), a_query.index, report_data->FormatHandle(a_query.pool).c_str()); } } } // There are tighter queue constraints to test for certain query pools if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_feedback); if (!phys_dev_ext_props.transform_feedback_props.transformFeedbackQueries) { const char *vuid = cmd == CMD_BEGINQUERYINDEXEDEXT ? "VUID-vkCmdBeginQueryIndexedEXT-queryType-02341" : "VUID-vkCmdBeginQuery-queryType-02328"; skip |= LogError(cb_state->commandBuffer(), vuid, "%s: queryPool was created with queryType VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT, but " "VkPhysicalDeviceTransformFeedbackPropertiesEXT::transformFeedbackQueries is not supported.", cmd_name); } } if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_occlusion); } if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (!cb_state->performance_lock_acquired) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_profile_lock, "%s: profiling lock must be held before vkBeginCommandBuffer is called on " "a command buffer where performance queries are recorded.", cmd_name); } if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_scope_not_first, "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded " "command in the command buffer.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_scope_in_rp, "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } } if (query_pool_ci.queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR || query_pool_ci.queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR) { const char *vuid = cmd == CMD_BEGINQUERYINDEXEDEXT ? "VUID-vkCmdBeginQueryIndexedEXT-queryType-04728" : "VUID-vkCmdBeginQuery-queryType-04728"; skip |= LogError(cb_state->commandBuffer(), vuid, "%s: QueryPool was created with queryType %s.", cmd_name, string_VkQueryType(query_pool_ci.queryType)); } if (query_pool_ci.queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) { const char *vuid = cmd == CMD_BEGINQUERYINDEXEDEXT ? "VUID-vkCmdBeginQueryIndexedEXT-queryType-04729" : "VUID-vkCmdBeginQuery-queryType-04729"; skip |= LogError(cb_state->commandBuffer(), vuid, "%s: QueryPool was created with queryType VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV.", cmd_name); } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags); if (flags & VK_QUERY_CONTROL_PRECISE_BIT) { if (!enabled_features.core.occlusionQueryPrecise) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.", cmd_name); } if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name); } } if (query_obj.query >= query_pool_ci.queryCount) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_query_count, "%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query, query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str()); } if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_protected_cb, "%s: command can't be used in protected command buffers.", cmd_name); } skip |= ValidateCmd(cb_state, cmd); return skip; } bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) const { if (disabled[query_validation]) return false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, slot); struct BeginQueryVuids : ValidateBeginQueryVuids { BeginQueryVuids() : ValidateBeginQueryVuids() { vuid_queue_flags = "VUID-vkCmdBeginQuery-commandBuffer-cmdpool"; vuid_queue_feedback = "VUID-vkCmdBeginQuery-queryType-02327"; vuid_queue_occlusion = "VUID-vkCmdBeginQuery-queryType-00803"; vuid_precise = "VUID-vkCmdBeginQuery-queryType-00800"; vuid_query_count = "VUID-vkCmdBeginQuery-query-00802"; vuid_profile_lock = "VUID-vkCmdBeginQuery-queryPool-03223"; vuid_scope_not_first = "VUID-vkCmdBeginQuery-queryPool-03224"; vuid_scope_in_rp = "VUID-vkCmdBeginQuery-queryPool-03225"; vuid_dup_query_type = "VUID-vkCmdBeginQuery-queryPool-01922"; vuid_protected_cb = "VUID-vkCmdBeginQuery-commandBuffer-01885"; } }; BeginQueryVuids vuids; return ValidateBeginQuery(cb_state.get(), query_obj, flags, 0, CMD_BEGINQUERY, &vuids); } static QueryState GetLocalQueryState(const QueryMap *localQueryToStateMap, VkQueryPool queryPool, uint32_t queryIndex, uint32_t perfPass) { QueryObject query = QueryObject(QueryObject(queryPool, queryIndex), perfPass); auto iter = localQueryToStateMap->find(query); if (iter != localQueryToStateMap->end()) return iter->second; return QUERYSTATE_UNKNOWN; } bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { bool skip = false; const auto query_pool_state = state_data->Get<QUERY_POOL_STATE>(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; QueryState state = GetLocalQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass); // If reset was in another command buffer, check the global map if (state == QUERYSTATE_UNKNOWN) { state = query_pool_state->GetQueryState(query_obj.query, perfPass); } // Performance queries have limitation upon when they can be // reset. if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && state == QUERYSTATE_UNKNOWN && perfPass >= query_pool_state->n_performance_passes) { // If the pass is invalid, assume RESET state, another error // will be raised in ValidatePerformanceQuery(). state = QUERYSTATE_RESET; } if (state != QUERYSTATE_RESET) { skip |= state_data->LogError(commandBuffer, kVUID_Core_DrawState_QueryNotReset, "%s: %s and query %" PRIu32 ": query not reset. " "After query pool creation, each query must be reset before it is used. " "Queries must also be reset between uses.", func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } return skip; } bool CoreChecks::ValidatePerformanceQuery(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { const auto query_pool_state = state_data->Get<QUERY_POOL_STATE>(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return false; const auto cb_state = state_data->Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; if (perfPass >= query_pool_state->n_performance_passes) { skip |= state_data->LogError(commandBuffer, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221", "Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", perfPass, query_pool_state->n_performance_passes, state_data->report_data->FormatHandle(query_obj.pool).c_str()); } if (!cb_state->performance_lock_acquired || cb_state->performance_lock_released) { skip |= state_data->LogError(commandBuffer, "VUID-vkQueueSubmit-pCommandBuffers-03220", "Commandbuffer %s was submitted and contains a performance query but the" "profiling lock was not held continuously throughout the recording of commands.", state_data->report_data->FormatHandle(commandBuffer).c_str()); } QueryState command_buffer_state = GetLocalQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass); if (command_buffer_state == QUERYSTATE_RESET) { skip |= state_data->LogError( commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-None-02863" : "VUID-vkCmdBeginQuery-None-02863", "VkQuery begin command recorded in a command buffer that, either directly or " "through secondary command buffers, also contains a vkCmdResetQueryPool command " "affecting the same query."); } if (firstPerfQueryPool != VK_NULL_HANDLE) { if (firstPerfQueryPool != query_obj.pool && !state_data->enabled_features.performance_query_features.performanceCounterMultipleQueryPools) { skip |= state_data->LogError( commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226", "Commandbuffer %s contains more than one performance query pool but " "performanceCounterMultipleQueryPools is not enabled.", state_data->report_data->FormatHandle(commandBuffer).c_str()); } } else { firstPerfQueryPool = query_obj.pool; } return skip; } void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) { auto cb_state = Get<CMD_BUFFER_STATE>(command_buffer); // Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; skip |= ValidatePerformanceQuery(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); skip |= VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); return skip; }); } void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { if (disabled[query_validation]) return; QueryObject query_obj = {queryPool, slot}; EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()"); } void CoreChecks::EnqueueVerifyEndQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) { auto cb_state = Get<CMD_BUFFER_STATE>(command_buffer); // Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord cb_state->queryUpdates.emplace_back([command_buffer, query_obj](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; const auto cb_state = device_data->Get<CMD_BUFFER_STATE>(command_buffer); const auto query_pool_state = device_data->Get<QUERY_POOL_STATE>(query_obj.pool); if (query_pool_state->has_perf_scope_command_buffer && (cb_state->commandCount - 1) != query_obj.endCommandIndex) { skip |= device_data->LogError(command_buffer, "VUID-vkCmdEndQuery-queryPool-03227", "vkCmdEndQuery: Query pool %s was created with a counter of scope" "VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last " "command in the command buffer %s.", device_data->report_data->FormatHandle(query_obj.pool).c_str(), device_data->report_data->FormatHandle(command_buffer).c_str()); } return skip; }); } bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, uint32_t index, CMD_TYPE cmd, const ValidateEndQueryVuids *vuids) const { bool skip = false; const char *cmd_name = CommandTypeString(cmd); if (!cb_state->activeQueries.count(query_obj)) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } const auto query_pool_state = Get<QUERY_POOL_STATE>(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQuery-queryPool-03228", "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags); skip |= ValidateCmd(cb_state, cmd); if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_protected_cb, "%s: command can't be used in protected command buffers.", cmd_name); } return skip; } bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; bool skip = false; QueryObject query_obj = {queryPool, slot}; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (query_pool_state) { const uint32_t available_query_count = query_pool_state->createInfo.queryCount; // Only continue validating if the slot is even within range if (slot >= available_query_count) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQuery-query-00810", "vkCmdEndQuery(): query index (%u) is greater or equal to the queryPool size (%u).", slot, available_query_count); } else { struct EndQueryVuids : ValidateEndQueryVuids { EndQueryVuids() : ValidateEndQueryVuids() { vuid_queue_flags = "VUID-vkCmdEndQuery-commandBuffer-cmdpool"; vuid_active_queries = "VUID-vkCmdEndQuery-None-01923"; vuid_protected_cb = "VUID-vkCmdEndQuery-commandBuffer-01886"; } }; EndQueryVuids vuids; skip |= ValidateCmdEndQuery(cb_state.get(), query_obj, 0, CMD_ENDQUERY, &vuids); } } return skip; } void CoreChecks::PreCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); QueryObject query_obj = {queryPool, slot}; query_obj.endCommandIndex = cb_state->commandCount - 1; EnqueueVerifyEndQuery(commandBuffer, query_obj); } bool CoreChecks::ValidateQueryPoolIndex(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *func_name, const char *first_vuid, const char *sum_vuid) const { bool skip = false; const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (query_pool_state) { const uint32_t available_query_count = query_pool_state->createInfo.queryCount; if (firstQuery >= available_query_count) { skip |= LogError(queryPool, first_vuid, "%s: In Query %s the firstQuery (%u) is greater or equal to the queryPool size (%u).", func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, available_query_count); } if ((firstQuery + queryCount) > available_query_count) { skip |= LogError(queryPool, sum_vuid, "%s: In Query %s the sum of firstQuery (%u) + queryCount (%u) is greater than the queryPool size (%u).", func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, queryCount, available_query_count); } } return skip; } bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { if (disabled[query_validation]) return false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_RESETQUERYPOOL); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "VkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-firstQuery-00796", "VUID-vkCmdResetQueryPool-firstQuery-00797"); return skip; } static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) { switch (state) { case QUERYSTATE_UNKNOWN: return QUERYRESULT_UNKNOWN; case QUERYSTATE_RESET: case QUERYSTATE_RUNNING: if (flags & VK_QUERY_RESULT_WAIT_BIT) { return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING); } else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_NO_DATA; } case QUERYSTATE_ENDED: if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_UNKNOWN; } case QUERYSTATE_AVAILABLE: return QUERYRESULT_SOME_DATA; } assert(false); return QUERYRESULT_UNKNOWN; } bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, uint32_t perfPass, VkQueryResultFlags flags, QueryMap *localQueryToStateMap) { bool skip = false; for (uint32_t i = 0; i < queryCount; i++) { QueryState state = GetLocalQueryState(localQueryToStateMap, queryPool, firstQuery + i, perfPass); QueryResultType result_type = GetQueryResultType(state, flags); if (result_type != QUERYRESULT_SOME_DATA && result_type != QUERYRESULT_UNKNOWN) { skip |= state_data->LogError( commandBuffer, kVUID_Core_DrawState_InvalidQuery, "vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s", state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type)); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) const { if (disabled[query_validation]) return false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); const auto dst_buff_state = Get<BUFFER_STATE>(dstBuffer); assert(cb_state); assert(dst_buff_state); bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state.get(), "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826"); skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823", stride, "dstOffset", dstOffset, flags); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buff_state.get(), VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmd(cb_state.get(), CMD_COPYQUERYPOOLRESULTS); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-firstQuery-00820", "VUID-vkCmdCopyQueryPoolResults-firstQuery-00821"); if (dstOffset >= dst_buff_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819", "vkCmdCopyQueryPoolResults() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset, dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer()).c_str()); } else if (dstOffset + (queryCount * stride) > dst_buff_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824", "vkCmdCopyQueryPoolResults() storage required (0x%" PRIxLEAST64 ") equal to dstOffset + (queryCount * stride) is greater than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset + (queryCount * stride), dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer()).c_str()); } const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (query_pool_state) { if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state.get(), firstQuery, queryCount, flags); if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-03232", "vkCmdCopyQueryPoolResults called with query pool %s but " "VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies " "is not set.", report_data->FormatHandle(queryPool).c_str()); } } if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && ((flags & VK_QUERY_RESULT_PARTIAL_BIT) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-00827", "vkCmdCopyQueryPoolResults() query pool %s was created with VK_QUERY_TYPE_TIMESTAMP so flags must not " "contain VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) { skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-02734", "vkCmdCopyQueryPoolResults() called but QueryPool %s was created with queryType " "VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL.", report_data->FormatHandle(queryPool).c_str()); } if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_VIDEO_ENCODE_BITSTREAM_BUFFER_RANGE_KHR || query_pool_state->createInfo.queryType == VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR) { skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-04812", "vkCmdCopyQueryPoolResults(): called but QueryPool %s was created with queryType " "%s.", report_data->FormatHandle(queryPool).c_str(), string_VkQueryType(query_pool_state->createInfo.queryType)); } } return skip; } void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { if (disabled[query_validation]) return; auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); cb_state->queryUpdates.emplace_back([commandBuffer, queryPool, firstQuery, queryCount, flags]( const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, perfPass, flags, localQueryToStateMap); }); } bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *pValues) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); skip |= ValidateCmd(cb_state.get(), CMD_PUSHCONSTANTS); // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range. if (!skip) { auto layout_state = Get<PIPELINE_LAYOUT_STATE>(layout); const auto &ranges = *layout_state->push_constant_ranges; VkShaderStageFlags found_stages = 0; for (const auto &range : ranges) { if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) { VkShaderStageFlags matching_stages = range.stageFlags & stageFlags; if (matching_stages != range.stageFlags) { skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01796", "vkCmdPushConstants(): stageFlags (%s, offset (%" PRIu32 "), and size (%" PRIu32 "), must contain all stages in overlapping VkPushConstantRange stageFlags (%s), offset (%" PRIu32 "), and size (%" PRIu32 ") in %s.", string_VkShaderStageFlags(stageFlags).c_str(), offset, size, string_VkShaderStageFlags(range.stageFlags).c_str(), range.offset, range.size, report_data->FormatHandle(layout).c_str()); } // Accumulate all stages we've found found_stages = matching_stages | found_stages; } } if (found_stages != stageFlags) { uint32_t missing_stages = ~found_stages & stageFlags; skip |= LogError( commandBuffer, "VUID-vkCmdPushConstants-offset-01795", "vkCmdPushConstants(): %s, VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain %s.", string_VkShaderStageFlags(stageFlags).c_str(), report_data->FormatHandle(layout).c_str(), offset, size, string_VkShaderStageFlags(missing_stages).c_str()); } } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_WRITETIMESTAMP); const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-queryPool-01416", "vkCmdWriteTimestamp(): Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", report_data->FormatHandle(queryPool).c_str()); } const uint32_t timestamp_valid_bits = physical_device_state->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits; if (timestamp_valid_bits == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-timestampValidBits-00829", "vkCmdWriteTimestamp(): Query Pool %s has a timestampValidBits value of zero.", report_data->FormatHandle(queryPool).c_str()); } if ((query_pool_state != nullptr) && (slot >= query_pool_state->createInfo.queryCount)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-query-04904", "vkCmdWriteTimestamp(): query (%" PRIu32 ") is not lower than the number of queries (%" PRIu32 ") in Query pool %s.", slot, query_pool_state->createInfo.queryCount, report_data->FormatHandle(queryPool).c_str()); } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; if (!enabled_features.synchronization2_features.synchronization2) { skip |= LogError(commandBuffer, "VUID-vkCmdWriteTimestamp2KHR-synchronization2-03858", "vkCmdWriteTimestamp2KHR(): Synchronization2 feature is not enabled"); } skip |= ValidateCmd(cb_state.get(), CMD_WRITETIMESTAMP); Location loc(Func::vkCmdWriteTimestamp2KHR, Field::stage); if ((stage & (stage - 1)) != 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-stage-03859", "%s (%s) must only set a single pipeline stage.", loc.Message().c_str(), string_VkPipelineStageFlags2KHR(stage).c_str()); } skip |= ValidatePipelineStage(LogObjectList(cb_state->commandBuffer()), loc, cb_state->GetQueueFlags(), stage); loc.field = Field::queryPool; const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (query_pool_state) { if (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-queryPool-03861", "%s Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", loc.Message().c_str(), report_data->FormatHandle(queryPool).c_str()); } if (slot >= query_pool_state->createInfo.queryCount) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-query-04903", "vkCmdWriteTimestamp2KHR(): query (%" PRIu32 ") is not lower than the number of queries (%" PRIu32 ") in Query pool %s.", slot, query_pool_state->createInfo.queryCount, report_data->FormatHandle(queryPool).c_str()); } } const uint32_t timestampValidBits = physical_device_state->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits; if (timestampValidBits == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-timestampValidBits-03863", "%s Query Pool %s has a timestampValidBits value of zero.", loc.Message().c_str(), report_data->FormatHandle(queryPool).c_str()); } return skip; } void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); QueryObject query = {queryPool, slot}; const char *func_name = "vkCmdWriteTimestamp()"; cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); }); } void CoreChecks::PreCallRecordCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); QueryObject query = {queryPool, slot}; const char *func_name = "vkCmdWriteTimestamp()"; cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); }); } void CoreChecks::PreCallRecordCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); const char *func_name = "vkCmdWriteAccelerationStructuresPropertiesKHR()"; cb_state->queryUpdates.emplace_back([accelerationStructureCount, commandBuffer, firstQuery, func_name, queryPool]( const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; for (uint32_t i = 0; i < accelerationStructureCount; i++) { QueryObject query = {{queryPool, firstQuery + i}, perfPass}; skip |= VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); } return skip; }); } bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2 *attachments, const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) const { bool skip = false; if (attachments) { for (uint32_t attach = 0; attach < count; attach++) { if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) { // Attachment counts are verified elsewhere, but prevent an invalid access if (attachments[attach].attachment < fbci->attachmentCount) { if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment]; auto view_state = Get<IMAGE_VIEW_STATE>(*image_view); if (view_state) { const auto &ici = view_state->image_state->createInfo; auto creation_usage = ici.usage; const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(ici.pNext); if (stencil_usage_info) { creation_usage |= stencil_usage_info->stencilUsage; } if ((creation_usage & usage_flag) == 0) { skip |= LogError(device, error_code, "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } else { const VkFramebufferAttachmentsCreateInfo *fbaci = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(fbci->pNext); if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr && fbaci->attachmentImageInfoCount > attachments[attach].attachment) { uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage; if ((image_usage & usage_flag) == 0) { skip |= LogError(device, error_code, "vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } } } } } return skip; } bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const { bool skip = false; const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(pCreateInfo->pNext); if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) { if (!enabled_features.core12.imagelessFramebuffer) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03189", "vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, " "but the imagelessFramebuffer feature is not enabled."); } if (framebuffer_attachments_create_info == nullptr) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03190", "vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, " "but no instance of VkFramebufferAttachmentsCreateInfo is present in the pNext chain."); } else { if (framebuffer_attachments_create_info->attachmentImageInfoCount != 0 && framebuffer_attachments_create_info->attachmentImageInfoCount != pCreateInfo->attachmentCount) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03191", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but " "VkFramebufferAttachmentsCreateInfo attachmentImageInfoCount is %u.", pCreateInfo->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount); } } } if (framebuffer_attachments_create_info) { for (uint32_t i = 0; i < framebuffer_attachments_create_info->attachmentImageInfoCount; ++i) { if (framebuffer_attachments_create_info->pAttachmentImageInfos[i].pNext != nullptr) { skip |= LogError(device, "VUID-VkFramebufferAttachmentImageInfo-pNext-pNext", "vkCreateFramebuffer(): VkFramebufferAttachmentsCreateInfo[%" PRIu32 "].pNext is not NULL.", i); } } } auto rp_state = Get<RENDER_PASS_STATE>(pCreateInfo->renderPass); if (rp_state) { const VkRenderPassCreateInfo2 *rpci = rp_state->createInfo.ptr(); if (rpci->attachmentCount != pCreateInfo->attachmentCount) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-attachmentCount-00876", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount " "of %u of %s being used to create Framebuffer.", pCreateInfo->attachmentCount, rpci->attachmentCount, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } else { // attachmentCounts match, so make sure corresponding attachment details line up if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { const VkImageView *image_views = pCreateInfo->pAttachments; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto view_state = Get<IMAGE_VIEW_STATE>(image_views[i]); if (view_state == nullptr) { skip |= LogError( image_views[i], "VUID-VkFramebufferCreateInfo-flags-02778", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i); } else { auto &ivci = view_state->create_info; auto &subresource_range = view_state->normalized_subresource_range; if (ivci.format != rpci->pAttachments[i].format) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00880", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not " "match the format of %s used by the corresponding attachment for %s.", i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } const auto &ici = view_state->image_state->createInfo; if (ici.samples != rpci->pAttachments[i].samples) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00881", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not " "match the %s " "samples used by the corresponding attachment for %s.", i, string_VkSampleCountFlagBits(ici.samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } // Verify that image memory is valid auto image_data = Get<IMAGE_STATE>(ivci.image); skip |= ValidateMemoryIsBoundToImage(image_data.get(), "vkCreateFramebuffer()", kVUID_Core_Bound_Resource_FreedMemoryAccess); // Verify that view only has a single mip level if (subresource_range.levelCount != 1) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-00883", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but " "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.", i, subresource_range.levelCount); } const uint32_t mip_level = subresource_range.baseMipLevel; uint32_t mip_width = max(1u, ici.extent.width >> mip_level); uint32_t mip_height = max(1u, ici.extent.height >> mip_level); bool used_as_input_color_resolve_depth_stencil_attachment = false; bool used_as_fragment_shading_rate_attachment = false; bool fsr_non_zero_viewmasks = false; for (uint32_t j = 0; j < rpci->subpassCount; ++j) { const VkSubpassDescription2 &subpass = rpci->pSubpasses[j]; uint32_t highest_view_bit = MostSignificantBit(subpass.viewMask); for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) { if (subpass.pInputAttachments[k].attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) { if (subpass.pColorAttachments[k].attachment == i || (subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; } if (used_as_input_color_resolve_depth_stencil_attachment) { if (subresource_range.layerCount <= highest_view_bit) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-renderPass-04536", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, subresource_range.layerCount, highest_view_bit, j); } } if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment; fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext); if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) { used_as_fragment_shading_rate_attachment = true; if ((mip_width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04539", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level " "%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "width (%u) and the " "specified shading rate texel width (%u) are smaller than the " "corresponding framebuffer width (%u).", i, subresource_range.baseMipLevel, j, mip_width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width); } if ((mip_height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04540", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u " "is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "height (%u) and the " "specified shading rate texel height (%u) are smaller than the corresponding " "framebuffer height (%u).", i, subresource_range.baseMipLevel, j, mip_height, fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height); } if (highest_view_bit != 0) { fsr_non_zero_viewmasks = true; } if (subresource_range.layerCount <= highest_view_bit) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04537", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, subresource_range.layerCount, highest_view_bit, j); } } } } if (enabled_features.fragment_density_map_features.fragmentDensityMap) { const VkRenderPassFragmentDensityMapCreateInfoEXT *fdm_attachment; fdm_attachment = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rpci->pNext); if (fdm_attachment && fdm_attachment->fragmentDensityMapAttachment.attachment == i) { uint32_t ceiling_width = static_cast<uint32_t>(ceil( static_cast<float>(pCreateInfo->width) / std::max(static_cast<float>( phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width), 1.0f))); if (mip_width < ceiling_width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-02555", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width " "smaller than the corresponding the ceiling of framebuffer width / " "maxFragmentDensityTexelSize.width " "Here are the respective dimensions for attachment #%u, the ceiling value:\n " "attachment #%u, framebuffer:\n" "width: %u, the ceiling value: %u\n", i, subresource_range.baseMipLevel, i, i, mip_width, ceiling_width); } uint32_t ceiling_height = static_cast<uint32_t>(ceil( static_cast<float>(pCreateInfo->height) / std::max(static_cast<float>( phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height), 1.0f))); if (mip_height < ceiling_height) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-02556", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height " "smaller than the corresponding the ceiling of framebuffer height / " "maxFragmentDensityTexelSize.height " "Here are the respective dimensions for attachment #%u, the ceiling value:\n " "attachment #%u, framebuffer:\n" "height: %u, the ceiling value: %u\n", i, subresource_range.baseMipLevel, i, i, mip_height, ceiling_height); } if (view_state->normalized_subresource_range.layerCount != 1) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-pAttachments-02744", "vkCreateFramebuffer(): pCreateInfo->pAttachments[%" PRIu32 "] is referenced by " "VkRenderPassFragmentDensityMapCreateInfoEXT::fragmentDensityMapAttachment in " "the pNext chain, but it was create with subresourceRange.layerCount (%" PRIu32 ") different from 1.", i, view_state->normalized_subresource_range.layerCount); } } } if (used_as_input_color_resolve_depth_stencil_attachment) { if (mip_width < pCreateInfo->width) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04533", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has " "width (%u) smaller than the corresponding framebuffer width (%u).", i, mip_level, mip_width, pCreateInfo->width); } if (mip_height < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04534", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has " "height (%u) smaller than the corresponding framebuffer height (%u).", i, mip_level, mip_height, pCreateInfo->height); } uint32_t layerCount = view_state->create_info.subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS ? view_state->image_state->createInfo.arrayLayers : view_state->create_info.subresourceRange.layerCount; if (layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04535", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, layerCount, pCreateInfo->layers); } } if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) { if (subresource_range.layerCount != 1 && subresource_range.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04538", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, subresource_range.layerCount, pCreateInfo->layers); } } if (IsIdentitySwizzle(ivci.components) == false) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-00884", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g), string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a)); } if ((ivci.viewType == VK_IMAGE_VIEW_TYPE_2D) || (ivci.viewType == VK_IMAGE_VIEW_TYPE_2D)) { const auto image_state = Get<IMAGE_STATE>(ivci.image); if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if (FormatIsDepthOrStencil(ivci.format)) { LogObjectList objlist(device); objlist.add(ivci.image); skip |= LogError( objlist, "VUID-VkFramebufferCreateInfo-pAttachments-00891", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type of " "%s " "which was taken from image %s of type VK_IMAGE_TYPE_3D, but the image view format is a " "depth/stencil format %s", i, string_VkImageViewType(ivci.viewType), report_data->FormatHandle(ivci.image).c_str(), string_VkFormat(ivci.format)); } } } if (ivci.viewType == VK_IMAGE_VIEW_TYPE_3D) { LogObjectList objlist(device); objlist.add(image_views[i]); skip |= LogError(objlist, "VUID-VkFramebufferCreateInfo-flags-04113", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type " "of VK_IMAGE_VIEW_TYPE_3D", i); } } } } else if (framebuffer_attachments_create_info) { // VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT is set for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto &aii = framebuffer_attachments_create_info->pAttachmentImageInfos[i]; bool format_found = false; for (uint32_t j = 0; j < aii.viewFormatCount; ++j) { if (aii.pViewFormats[j] == rpci->pAttachments[i].format) { format_found = true; } } if (!format_found) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-flags-03205", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include " "format %s used " "by the corresponding attachment for renderPass (%s).", i, string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } bool used_as_input_color_resolve_depth_stencil_attachment = false; bool used_as_fragment_shading_rate_attachment = false; bool fsr_non_zero_viewmasks = false; for (uint32_t j = 0; j < rpci->subpassCount; ++j) { const VkSubpassDescription2 &subpass = rpci->pSubpasses[j]; uint32_t highest_view_bit = 0; for (int k = 0; k < 32; ++k) { if (((subpass.viewMask >> k) & 1) != 0) { highest_view_bit = k; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) { if (subpass.pInputAttachments[k].attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) { if (subpass.pColorAttachments[k].attachment == i || (subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; } if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment; fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext); if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) { used_as_fragment_shading_rate_attachment = true; if ((aii.width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04543", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its width (%u) and the " "specified shading rate texel width (%u) are smaller than the corresponding framebuffer " "width (%u).", i, j, aii.width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width); } if ((aii.height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04544", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "height (%u) and the " "specified shading rate texel height (%u) are smaller than the corresponding " "framebuffer height (%u).", i, j, aii.height, fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height); } if (highest_view_bit != 0) { fsr_non_zero_viewmasks = true; } if (aii.layerCount != 1 && aii.layerCount <= highest_view_bit) { skip |= LogError( device, kVUIDUndefined, "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, aii.layerCount, highest_view_bit, j); } } } } if (used_as_input_color_resolve_depth_stencil_attachment) { if (aii.width < pCreateInfo->width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04541", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, " "but framebuffer has a width of #%u.", i, aii.width, pCreateInfo->width); } if (aii.height < pCreateInfo->height) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04542", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, " "but framebuffer has a height of #%u.", i, aii.height, pCreateInfo->height); } const char *mismatched_layers_no_multiview_vuid = IsExtEnabled(device_extensions.vk_khr_multiview) ? "VUID-VkFramebufferCreateInfo-renderPass-04546" : "VUID-VkFramebufferCreateInfo-flags-04547"; if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) { if (aii.layerCount < pCreateInfo->layers) { skip |= LogError( device, mismatched_layers_no_multiview_vuid, "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, " "but framebuffer has #%u layers.", i, aii.layerCount, pCreateInfo->layers); } } } if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) { if (aii.layerCount != 1 && aii.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04545", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, aii.layerCount, pCreateInfo->layers); } } } // Validate image usage uint32_t attachment_index = VK_ATTACHMENT_UNUSED; for (uint32_t i = 0; i < rpci->subpassCount; ++i) { skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201"); skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201"); skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202"); skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204"); const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext); if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve) && depth_stencil_resolve != nullptr) { skip |= MatchUsage(1, depth_stencil_resolve->pDepthStencilResolveAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203"); } const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(rpci->pSubpasses[i].pNext); if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && fragment_shading_rate_attachment_info != nullptr) { skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, "VUID-VkFramebufferCreateInfo-flags-04549"); } } if (IsExtEnabled(device_extensions.vk_khr_multiview)) { if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) { for (uint32_t i = 0; i < rpci->subpassCount; ++i) { const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext); uint32_t view_bits = rpci->pSubpasses[i].viewMask; uint32_t highest_view_bit = 0; for (int j = 0; j < 32; ++j) { if (((view_bits >> j) & 1) != 0) { highest_view_bit = j; } } for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) { attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a color attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } if (rpci->pSubpasses[i].pResolveAttachments) { attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a resolve attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } } } for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) { attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as an input attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } } if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) { attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a depth/stencil attachment.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit); } } if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve) && depth_stencil_resolve != nullptr && depth_stencil_resolve->pDepthStencilResolveAttachment != nullptr) { attachment_index = depth_stencil_resolve->pDepthStencilResolveAttachment->attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a depth/stencil resolve " "attachment.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit); } } } } } } } } if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { // Verify correct attachment usage flags for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) { const VkSubpassDescription2 &subpass_description = rpci->pSubpasses[subpass]; // Verify input attachments: skip |= MatchUsage(subpass_description.inputAttachmentCount, subpass_description.pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879"); // Verify color attachments: skip |= MatchUsage(subpass_description.colorAttachmentCount, subpass_description.pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877"); // Verify depth/stencil attachments: skip |= MatchUsage(1, subpass_description.pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633"); // Verify depth/stecnil resolve if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve)) { const VkSubpassDescriptionDepthStencilResolve *ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_description.pNext); if (ds_resolve) { skip |= MatchUsage(1, ds_resolve->pDepthStencilResolveAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02634"); } } // Verify fragment shading rate attachments if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass_description.pNext); if (fragment_shading_rate_attachment_info) { skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, "VUID-VkFramebufferCreateInfo-flags-04548"); } } } } bool b_has_non_zero_view_masks = false; for (uint32_t i = 0; i < rpci->subpassCount; ++i) { if (rpci->pSubpasses[i].viewMask != 0) { b_has_non_zero_view_masks = true; break; } } if (b_has_non_zero_view_masks && pCreateInfo->layers != 1) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-02531", "vkCreateFramebuffer(): VkFramebufferCreateInfo has #%u layers but " "renderPass (%s) was specified with non-zero view masks\n", pCreateInfo->layers, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } } } // Verify FB dimensions are within physical device limits if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00886", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested " "width: %u, device max: %u\n", pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth); } if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00888", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested " "height: %u, device max: %u\n", pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight); } if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00890", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested " "layers: %u, device max: %u\n", pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers); } // Verify FB dimensions are greater than zero if (pCreateInfo->width <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00885", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero."); } if (pCreateInfo->height <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00887", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero."); } if (pCreateInfo->layers <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00889", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero."); } return skip; } bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const { // TODO : Verify that renderPass FB is created with is compatible with FB bool skip = false; skip |= ValidateFramebufferCreateInfo(pCreateInfo); return skip; } static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node, layer_data::unordered_set<uint32_t> &processed_nodes) { // If we have already checked this node we have not found a dependency path so return false. if (processed_nodes.count(index)) return false; processed_nodes.insert(index); const DAGNode &node = subpass_to_node[index]; // Look for a dependency path. If one exists return true else recurse on the previous nodes. if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { for (auto elem : node.prev) { if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true; } } else { return true; } return false; } bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const { if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) { return true; } return false; } bool CoreChecks::CheckDependencyExists(const VkRenderPass renderpass, const uint32_t subpass, const VkImageLayout layout, const std::vector<SubpassLayout> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node, bool &skip) const { bool result = true; bool b_image_layout_read_only = IsImageLayoutReadOnly(layout); // Loop through all subpasses that share the same attachment and make sure a dependency exists for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { const SubpassLayout &sp = dependent_subpasses[k]; if (subpass == sp.index) continue; if (b_image_layout_read_only && IsImageLayoutReadOnly(sp.layout)) continue; const DAGNode &node = subpass_to_node[subpass]; // Check for a specified dependency between the two nodes. If one exists we are done. auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index); auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index); if (prev_elem == node.prev.end() && next_elem == node.next.end()) { // If no dependency exits an implicit dependency still might. If not, throw an error. layer_data::unordered_set<uint32_t> processed_nodes; if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) || FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) { skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass, "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index); result = false; } } } return result; } bool CoreChecks::CheckPreserved(const VkRenderPass renderpass, const VkRenderPassCreateInfo2 *pCreateInfo, const int index, const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) const { const DAGNode &node = subpass_to_node[index]; // If this node writes to the attachment return true as next nodes need to preserve the attachment. const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[index]; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (attachment == subpass.pColorAttachments[j].attachment) return true; } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { if (attachment == subpass.pInputAttachments[j].attachment) return true; } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (attachment == subpass.pDepthStencilAttachment->attachment) return true; } bool result = false; // Loop through previous nodes and see if any of them write to the attachment. for (auto elem : node.prev) { result |= CheckPreserved(renderpass, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip); } // If the attachment was written to by a previous node than this node needs to preserve it. if (result && depth > 0) { bool has_preserved = false; for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { if (subpass.pPreserveAttachments[j] == attachment) { has_preserved = true; break; } } if (!has_preserved) { skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass, "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); } } return result; } template <class T> bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) { return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || ((offset1 > offset2) && (offset1 < (offset2 + size2))); } bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); } bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const { bool skip = false; auto const framebuffer_info = framebuffer->createInfo.ptr(); auto const create_info = renderPass->createInfo.ptr(); auto const &subpass_to_node = renderPass->subpass_to_node; struct Attachment { std::vector<SubpassLayout> outputs; std::vector<SubpassLayout> inputs; std::vector<uint32_t> overlapping; }; std::vector<Attachment> attachments(create_info->attachmentCount); if (!(framebuffer_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) { // Find overlapping attachments for (uint32_t i = 0; i < create_info->attachmentCount; ++i) { for (uint32_t j = i + 1; j < create_info->attachmentCount; ++j) { VkImageView viewi = framebuffer_info->pAttachments[i]; VkImageView viewj = framebuffer_info->pAttachments[j]; if (viewi == viewj) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); continue; } auto *view_state_i = framebuffer->attachments_view_state[i].get(); auto *view_state_j = framebuffer->attachments_view_state[j].get(); if (!view_state_i || !view_state_j) { continue; } auto view_ci_i = view_state_i->create_info; auto view_ci_j = view_state_j->create_info; if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); continue; } auto *image_data_i = view_state_i->image_state.get(); auto *image_data_j = view_state_j->image_state.get(); if (!image_data_i || !image_data_j) { continue; } const auto *binding_i = image_data_i->Binding(); const auto *binding_j = image_data_j->Binding(); if (binding_i && binding_j && binding_i->mem_state == binding_j->mem_state && IsRangeOverlapping(binding_i->offset, binding_i->size, binding_j->offset, binding_j->size)) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); } } } } // Find for each attachment the subpasses that use them. layer_data::unordered_set<uint32_t> attachment_indices; for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; attachment_indices.clear(); for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; SubpassLayout sp = {i, subpass.pInputAttachments[j].layout}; attachments[attachment].inputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].inputs.emplace_back(sp); } } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; SubpassLayout sp = {i, subpass.pColorAttachments[j].layout}; attachments[attachment].outputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].outputs.emplace_back(sp); } attachment_indices.insert(attachment); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { uint32_t attachment = subpass.pDepthStencilAttachment->attachment; SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout}; attachments[attachment].outputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].outputs.emplace_back(sp); } if (attachment_indices.count(attachment)) { skip |= LogError(renderPass->renderPass(), kVUID_Core_DrawState_InvalidRenderpass, "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i); } } } // If there is a dependency needed make sure one exists for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; // If the attachment is an input then all subpasses that output must have a dependency relationship for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(renderPass->renderPass(), i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip); } // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(renderPass->renderPass(), i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip); CheckDependencyExists(renderPass->renderPass(), i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs, subpass_to_node, skip); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; CheckDependencyExists(renderPass->renderPass(), i, subpass.pDepthStencilAttachment->layout, attachments[attachment].outputs, subpass_to_node, skip); CheckDependencyExists(renderPass->renderPass(), i, subpass.pDepthStencilAttachment->layout, attachments[attachment].inputs, subpass_to_node, skip); } } // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was // written. for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { CheckPreserved(renderPass->renderPass(), create_info, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip); } } return skip; } bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo) const { bool skip = false; const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { const VkSubpassDependency2 &dependency = pCreateInfo->pDependencies[i]; auto latest_src_stage = sync_utils::GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask); auto earliest_dst_stage = sync_utils::GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask); // The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if // any are, which enables multiview. if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-viewMask-03059", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i); } else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) { skip |= LogError(device, "VUID-VkSubpassDependency2-dependencyFlags-03092", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i, dependency.viewOffset); } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { if (dependency.srcSubpass == dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865"; skip |= LogError(device, vuid, "The src and dst subpasses in dependency %u are both external.", i); } else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency-dependencyFlags-02520"; } else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL vuid = "VUID-VkSubpassDependency-dependencyFlags-02521"; } if (use_rp2) { // Create render pass 2 distinguishes between source and destination external dependencies. if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency2-dependencyFlags-03090"; } else { vuid = "VUID-VkSubpassDependency2-dependencyFlags-03091"; } } skip |= LogError(device, vuid, "Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i); } } else if (dependency.srcSubpass > dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864"; skip |= LogError(device, vuid, "Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is " "disallowed to prevent cyclic dependencies.", i, dependency.srcSubpass, dependency.dstSubpass); } else if (dependency.srcSubpass == dependency.dstSubpass) { if (dependency.viewOffset != 0) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-viewOffset-02530" : "VUID-VkRenderPassCreateInfo-pNext-01930"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i, dependency.viewOffset); } else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags && pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not " "specify VK_DEPENDENCY_VIEW_LOCAL_BIT.", i, dependency.srcSubpass); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) || HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) && (sync_utils::GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) > sync_utils::GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867"; skip |= LogError( device, vuid, "Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).", i, sync_utils::StringPipelineStageFlags(latest_src_stage).c_str(), sync_utils::StringPipelineStageFlags(earliest_dst_stage).c_str()); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) == false) && (HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask) == false) && ((dependency.dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT) == 0)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-02245" : "VUID-VkSubpassDependency-srcSubpass-02243"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency for subpass %u with both stages including a " "framebuffer-space stage, but does not specify VK_DEPENDENCY_BY_REGION_BIT in dependencyFlags.", i, dependency.srcSubpass); } } else if ((dependency.srcSubpass < dependency.dstSubpass) && ((pCreateInfo->pSubpasses[dependency.srcSubpass].flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0)) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-flags-04909" : "VUID-VkSubpassDescription-flags-03343"; skip |= LogError(device, vuid, "Dependency %u specifies that subpass %u has a dependency on a later subpass" "and includes VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM subpass flags.", i, dependency.srcSubpass); } } return skip; } bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count, const char *error_type, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); assert(attachment != VK_ATTACHMENT_UNUSED); if (attachment >= attachment_count) { const char *vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834"; skip |= LogError(device, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", function_name, error_type, attachment, attachment_count); } return skip; } enum AttachmentType { ATTACHMENT_COLOR = 1, ATTACHMENT_DEPTH = 2, ATTACHMENT_INPUT = 4, ATTACHMENT_PRESERVE = 8, ATTACHMENT_RESOLVE = 16, }; char const *StringAttachmentType(uint8_t type) { switch (type) { case ATTACHMENT_COLOR: return "color"; case ATTACHMENT_DEPTH: return "depth"; case ATTACHMENT_INPUT: return "input"; case ATTACHMENT_PRESERVE: return "preserve"; case ATTACHMENT_RESOLVE: return "resolve"; default: return "(multiple)"; } } bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) const { if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */ bool skip = false; auto &uses = attachment_uses[attachment]; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2()" : "vkCreateRenderPass()"; if (uses & new_use) { if (attachment_layouts[attachment] != new_layout) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-layout-02528" : "VUID-VkSubpassDescription-layout-02519"; skip |= LogError(device, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout)); } } else if (((new_use & ATTACHMENT_COLOR) && (uses & ATTACHMENT_DEPTH)) || ((uses & ATTACHMENT_COLOR) && (new_use & ATTACHMENT_DEPTH))) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-04440" : "VUID-VkSubpassDescription-pDepthStencilAttachment-04438"; skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment, StringAttachmentType(uses), StringAttachmentType(new_use)); } else if ((uses && (new_use & ATTACHMENT_PRESERVE)) || (new_use && (uses & ATTACHMENT_PRESERVE))) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pPreserveAttachments-03074" : "VUID-VkSubpassDescription-pPreserveAttachments-00854"; skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment, StringAttachmentType(uses), StringAttachmentType(new_use)); } else { attachment_layouts[attachment] = new_layout; uses |= new_use; } return skip; } // Handles attachment references regardless of type (input, color, depth, etc) // Input attachments have extra VUs associated with them bool CoreChecks::ValidateAttachmentReference(RenderPassCreateVersion rp_version, VkAttachmentReference2 reference, const VkFormat attachment_format, bool input, const char *error_type, const char *function_name) const { bool skip = false; // Currently all VUs require attachment to not be UNUSED assert(reference.attachment != VK_ATTACHMENT_UNUSED); // currently VkAttachmentReference and VkAttachmentReference2 have no overlapping VUs if (rp_version == RENDER_PASS_VERSION_1) { switch (reference.layout) { case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: skip |= LogError(device, "VUID-VkAttachmentReference-layout-00857", "%s: Layout for %s is %s but must not be " "VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR|DEPTH_ATTACHMENT_OPTIMAL|DEPTH_READ_" "ONLY_OPTIMAL|STENCIL_ATTACHMENT_OPTIMAL|STENCIL_READ_ONLY_OPTIMAL].", function_name, error_type, string_VkImageLayout(reference.layout)); break; default: break; } } else { const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(reference.pNext); switch (reference.layout) { case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: skip |= LogError(device, "VUID-VkAttachmentReference2-layout-03077", "%s: Layout for %s is %s but must not be VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR].", function_name, error_type, string_VkImageLayout(reference.layout)); break; // Only other layouts in VUs to be checked case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: // First need to make sure feature bit is enabled and the format is actually a depth and/or stencil if (!enabled_features.core12.separateDepthStencilLayouts) { skip |= LogError(device, "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313", "%s: Layout for %s is %s but without separateDepthStencilLayouts enabled the layout must not " "be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout)); } else if (!FormatIsDepthOrStencil(attachment_format)) { // using this over FormatIsColor() incase a multiplane and/or undef would sneak in // "color" format is still an ambiguous term in spec (internal issue #2484) skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04754", "%s: Layout for %s is %s but the attachment is a not a depth/stencil format (%s) so the layout must not " "be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } else { if ((reference.layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL) || (reference.layout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL)) { if (FormatIsDepthOnly(attachment_format)) { skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04756", "%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not " "be VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } } else { // DEPTH_ATTACHMENT_OPTIMAL || DEPTH_READ_ONLY_OPTIMAL if (FormatIsStencilOnly(attachment_format)) { skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04757", "%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not " "be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } if (attachment_reference_stencil_layout) { // This check doesn't rely on the aspect mask value const VkImageLayout stencil_layout = attachment_reference_stencil_layout->stencilLayout; // clang-format off if (stencil_layout == VK_IMAGE_LAYOUT_UNDEFINED || stencil_layout == VK_IMAGE_LAYOUT_PREINITIALIZED || stencil_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { skip |= LogError(device, "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318", "%s: In %s with pNext chain instance VkAttachmentReferenceStencilLayout, " "the stencilLayout (%s) must not be " "VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PREINITIALIZED, " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, or " "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.", function_name, error_type, string_VkImageLayout(stencil_layout)); } // clang-format on } else if (FormatIsDepthAndStencil(attachment_format)) { skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04755", "%s: Layout for %s is %s but the attachment is a depth and stencil format (%s) so if the layout is " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL there needs " "to be a VkAttachmentReferenceStencilLayout in the pNext chain to set the seperate stencil layout " "because the separateDepthStencilLayouts feature is enabled.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } } } break; default: break; } } return skip; } bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { VkFormat format = pCreateInfo->pAttachments[i].format; if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { if ((FormatIsColor(format) || FormatHasDepth(format)) && pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass, "%s: Render pass pAttachment[%u] has loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == " "VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass.", function_name, i); } if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass, "%s: Render pass pAttachment[%u] has stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout " "== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass.", function_name, i); } } } // Track when we're observing the first use of an attachment std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true); for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount); std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount); // Track if attachments are used as input as well as another type layer_data::unordered_set<uint32_t> input_attachments; if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS && subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pipelineBindPoint-04953" : "VUID-VkSubpassDescription-pipelineBindPoint-04952"; skip |= LogError(device, vuid, "%s: Pipeline bind point for pSubpasses[%d] must be VK_PIPELINE_BIND_POINT_GRAPHICS or " "VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI.", function_name, i); } // Check input attachments first // - so we can detect first-use-as-input for VU #00349 // - if other color or depth/stencil is also input, it limits valid layouts for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { auto const &attachment_ref = subpass.pInputAttachments[j]; const uint32_t attachment_index = attachment_ref.attachment; const VkImageAspectFlags aspect_mask = attachment_ref.aspectMask; if (attachment_index != VK_ATTACHMENT_UNUSED) { input_attachments.insert(attachment_index); std::string error_type = "pSubpasses[" + std::to_string(i) + "].pInputAttachments[" + std::to_string(j) + "]"; skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-02801" : "VUID-VkInputAttachmentAspectReference-aspectMask-01964"; skip |= LogError( device, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.", function_name, j, i); } else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-04563" : "VUID-VkInputAttachmentAspectReference-aspectMask-02250"; skip |= LogError(device, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes " "VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit.", function_name, j, i); } // safe to dereference pCreateInfo->pAttachments[] if (attachment_index < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format; skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, true, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_INPUT, attachment_ref.layout); vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-02525" : "VUID-VkRenderPassCreateInfo-pNext-01963"; skip |= ValidateImageAspectMask(VK_NULL_HANDLE, attachment_format, aspect_mask, function_name, vuid); if (attach_first_use[attachment_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout, attachment_index, pCreateInfo->pAttachments[attachment_index]); bool used_as_depth = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment_index); bool used_as_color = false; for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) { used_as_color = (subpass.pColorAttachments[k].attachment == attachment_index); } if (!used_as_depth && !used_as_color && pCreateInfo->pAttachments[attachment_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846"; skip |= LogError(device, vuid, "%s: attachment %u is first used as an input attachment in %s with loadOp set to " "VK_ATTACHMENT_LOAD_OP_CLEAR.", function_name, attachment_index, error_type.c_str()); } } attach_first_use[attachment_index] = false; const VkFormatFeatureFlags2KHR valid_flags = VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR | VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR; const VkFormatFeatureFlags2KHR format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & valid_flags) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pInputAttachments-02897" : "VUID-VkSubpassDescription-pInputAttachments-02647"; skip |= LogError(device, vuid, "%s: Input attachment %s format (%s) does not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT " "| VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } } if (rp_version == RENDER_PASS_VERSION_2) { // These are validated automatically as part of parameter validation for create renderpass 1 // as they are in a struct that only applies to input attachments - not so for v2. // Check for 0 if (aspect_mask == 0) { skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02800", "%s: Input attachment %s aspect mask must not be 0.", function_name, error_type.c_str()); } else { const VkImageAspectFlags valid_bits = (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT); // Check for valid aspect mask bits if (aspect_mask & ~valid_bits) { skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02799", "%s: Input attachment %s aspect mask (0x%" PRIx32 ")is invalid.", function_name, error_type.c_str(), aspect_mask); } } } // Validate layout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (attachment_ref.layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_GENERAL: case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: break; // valid layouts default: skip |= LogError(device, vuid, "%s: %s layout is %s but input attachments must be " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, or " "VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); break; } } } for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pPreserveAttachments[" + std::to_string(j) + "]"; uint32_t attachment = subpass.pPreserveAttachments[j]; if (attachment == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853"; skip |= LogError(device, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j); } else { skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE, VkImageLayout(0) /* preserve doesn't have any layout */); } } } bool subpass_performs_resolve = false; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (subpass.pResolveAttachments) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pResolveAttachments[" + std::to_string(j) + "]"; auto const &attachment_ref = subpass.pResolveAttachments[j]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); // safe to dereference pCreateInfo->pAttachments[] if (attachment_ref.attachment < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_ref.attachment].format; skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_RESOLVE, attachment_ref.layout); subpass_performs_resolve = true; if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03067" : "VUID-VkSubpassDescription-pResolveAttachments-00849"; skip |= LogError( device, vuid, "%s: Subpass %u requests multisample resolve into attachment %u, which must " "have VK_SAMPLE_COUNT_1_BIT but has %s.", function_name, i, attachment_ref.attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples)); } const VkFormatFeatureFlags2KHR format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-02899" : "VUID-VkSubpassDescription-pResolveAttachments-02649"; skip |= LogError(device, vuid, "%s: Resolve attachment %s format (%s) does not contain " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } // VK_QCOM_render_pass_shader_resolve check of resolve attachmnents if ((subpass.flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-flags-04907" : "VUID-VkSubpassDescription-flags-03341"; skip |= LogError( device, vuid, "%s: Subpass %u enables shader resolve, which requires every element of pResolve attachments" " must be VK_ATTACHMENT_UNUSED, but element %u contains a reference to attachment %u instead.", function_name, i, j, attachment_ref.attachment); } } } } } if (subpass.pDepthStencilAttachment) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pDepthStencilAttachment"; const uint32_t attachment = subpass.pDepthStencilAttachment->attachment; const VkImageLayout image_layout = subpass.pDepthStencilAttachment->layout; if (attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); // safe to dereference pCreateInfo->pAttachments[] if (attachment < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment].format; skip |= ValidateAttachmentReference(rp_version, *subpass.pDepthStencilAttachment, attachment_format, false, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_DEPTH, image_layout); if (attach_first_use[attachment]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, image_layout, attachment, pCreateInfo->pAttachments[attachment]); } attach_first_use[attachment] = false; const VkFormatFeatureFlags2KHR format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-02900" : "VUID-VkSubpassDescription-pDepthStencilAttachment-02650"; skip |= LogError(device, vuid, "%s: Depth Stencil %s format (%s) does not contain " "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } } // Check for valid imageLayout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (image_layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_GENERAL: break; // valid layouts case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR: case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR: if (input_attachments.find(attachment) != input_attachments.end()) { skip |= LogError( device, vuid, "%s: %s is also an input attachment so the layout (%s) must not be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR " "or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(image_layout)); } break; default: skip |= LogError(device, vuid, "%s: %s layout is %s but depth/stencil attachments must be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_GENERAL, " "VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR or" "VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(image_layout)); break; } } } uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pColorAttachments[" + std::to_string(j) + "]"; auto const &attachment_ref = subpass.pColorAttachments[j]; const uint32_t attachment_index = attachment_ref.attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(), function_name); // safe to dereference pCreateInfo->pAttachments[] if (attachment_index < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format; skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_COLOR, attachment_ref.layout); VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_index].samples; if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) { VkSampleCountFlagBits last_sample_count = pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples; if (current_sample_count != last_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03069" : "VUID-VkSubpassDescription-pColorAttachments-01417"; skip |= LogError( device, vuid, "%s: Subpass %u attempts to render to color attachments with inconsistent sample counts." "Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(current_sample_count), last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count)); } } last_sample_count_attachment = j; if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03066" : "VUID-VkSubpassDescription-pResolveAttachments-00848"; skip |= LogError(device, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "VK_SAMPLE_COUNT_1_BIT.", function_name, i, attachment_index); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) { const auto depth_stencil_sample_count = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples; if (IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples)) { if (current_sample_count > depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03070" : "VUID-VkSubpassDescription-pColorAttachments-01506"; skip |= LogError(device, vuid, "%s: %s has %s which is larger than depth/stencil attachment %s.", function_name, error_type.c_str(), string_VkSampleCountFlagBits(current_sample_count), string_VkSampleCountFlagBits(depth_stencil_sample_count)); break; } } if (!IsExtEnabled(device_extensions.vk_amd_mixed_attachment_samples) && !IsExtEnabled(device_extensions.vk_nv_framebuffer_mixed_samples) && current_sample_count != depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071" : "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"; skip |= LogError(device, vuid, "%s: Subpass %u attempts to render to use a depth/stencil attachment with sample " "count that differs " "from color attachment %u." "The depth attachment ref has sample count %s, whereas color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j, string_VkSampleCountFlagBits(current_sample_count)); break; } } const VkFormatFeatureFlags2KHR format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-02898" : "VUID-VkSubpassDescription-pColorAttachments-02648"; skip |= LogError(device, vuid, "%s: Color attachment %s format (%s) does not contain " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } if (attach_first_use[attachment_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout, attachment_index, pCreateInfo->pAttachments[attachment_index]); } attach_first_use[attachment_index] = false; } // Check for valid imageLayout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (attachment_ref.layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_GENERAL: break; // valid layouts case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR: if (input_attachments.find(attachment_index) != input_attachments.end()) { skip |= LogError(device, vuid, "%s: %s is also an input attachment so the layout (%s) must not be " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); } break; default: skip |= LogError(device, vuid, "%s: %s layout is %s but color attachments must be " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR, " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or " "VK_IMAGE_LAYOUT_GENERAL.", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); break; } } if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED && subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) { if (attachment_index == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03065" : "VUID-VkSubpassDescription-pResolveAttachments-00847"; skip |= LogError(device, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "attachment=VK_ATTACHMENT_UNUSED.", function_name, i, attachment_index); } else { const auto &color_desc = pCreateInfo->pAttachments[attachment_index]; const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment]; if (color_desc.format != resolve_desc.format) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03068" : "VUID-VkSubpassDescription-pResolveAttachments-00850"; skip |= LogError(device, vuid, "%s: %s resolves to an attachment with a " "different format. color format: %u, resolve format: %u.", function_name, error_type.c_str(), color_desc.format, resolve_desc.format); } } } } } return skip; } bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo, function_name); skip |= ValidateRenderPassDAG(rp_version, pCreateInfo); // Validate multiview correlation and view masks bool view_mask_zero = false; bool view_mask_non_zero = false; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; if (subpass.viewMask != 0) { view_mask_non_zero = true; } else { view_mask_zero = true; } if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 && (subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-flags-03076" : "VUID-VkSubpassDescription-flags-00856"; skip |= LogError(device, vuid, "%s: The flags parameter of subpass description %u includes " "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include " "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.", function_name, i); } } if (rp_version == RENDER_PASS_VERSION_2) { if (view_mask_non_zero && view_mask_zero) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03058", "%s: Some view masks are non-zero whilst others are zero.", function_name); } if (view_mask_zero && pCreateInfo->correlatedViewMaskCount != 0) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03057", "%s: Multiview is not enabled but correlation masks are still provided", function_name); } } uint32_t aggregated_cvms = 0; for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) { if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056" : "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841"; skip |= LogError(device, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i); } aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i]; } LogObjectList objects(device); auto func_name = use_rp2 ? Func::vkCreateRenderPass2 : Func::vkCreateRenderPass; auto structure = use_rp2 ? Struct::VkSubpassDependency2 : Struct::VkSubpassDependency; for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { auto const &dependency = pCreateInfo->pDependencies[i]; Location loc(func_name, structure, Field::pDependencies, i); skip |= ValidateSubpassDependency(objects, loc, dependency); } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { bool skip = false; // Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds) const VkRenderPassMultiviewCreateInfo *multiview_info = LvlFindInChain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext); if (multiview_info) { if (multiview_info->subpassCount && multiview_info->subpassCount != pCreateInfo->subpassCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01928", "vkCreateRenderPass(): Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount, multiview_info->subpassCount); } else if (multiview_info->dependencyCount && multiview_info->dependencyCount != pCreateInfo->dependencyCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01929", "vkCreateRenderPass(): Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount, multiview_info->dependencyCount); } bool all_zero = true; bool all_not_zero = true; for (uint32_t i = 0; i < multiview_info->subpassCount; ++i) { all_zero &= multiview_info->pViewMasks[i] == 0; all_not_zero &= !(multiview_info->pViewMasks[i] == 0); } if (!all_zero && !all_not_zero) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo-pNext-02513", "vkCreateRenderPass(): elements of VkRenderPassMultiviewCreateInfo pViewMasks must all be either 0 or not 0."); } } const VkRenderPassInputAttachmentAspectCreateInfo *input_attachment_aspect_info = LvlFindInChain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext); if (input_attachment_aspect_info) { for (uint32_t i = 0; i < input_attachment_aspect_info->aspectReferenceCount; ++i) { uint32_t subpass = input_attachment_aspect_info->pAspectReferences[i].subpass; uint32_t attachment = input_attachment_aspect_info->pAspectReferences[i].inputAttachmentIndex; if (subpass >= pCreateInfo->subpassCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01926", "vkCreateRenderPass(): Subpass index %u specified by input attachment aspect info %u is greater " "than the subpass " "count of %u for this render pass.", subpass, i, pCreateInfo->subpassCount); } else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01927", "vkCreateRenderPass(): Input attachment index %u specified by input attachment aspect info %u is " "greater than the " "input attachment count of %u for this subpass.", attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount); } } } // TODO - VK_EXT_fragment_density_map should be moved into generic ValidateCreateRenderPass() and given RP2 VUIDs const VkRenderPassFragmentDensityMapCreateInfoEXT *fragment_density_map_info = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext); if (fragment_density_map_info) { if (fragment_density_map_info->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) { if (fragment_density_map_info->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-fragmentDensityMapAttachment-06471", "vkCreateRenderPass(): fragmentDensityMapAttachment %u must be less than attachmentCount %u of " "for this render pass.", fragment_density_map_info->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount); } else { if (!(fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT || fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) { skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549", "vkCreateRenderPass(): Layout of fragmentDensityMapAttachment %u' must be equal to " "VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } if (!(pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD || pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp == VK_ATTACHMENT_LOAD_OP_DONT_CARE)) { skip |= LogError( device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550", "vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a loadOp " "equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } if (pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].storeOp != VK_ATTACHMENT_STORE_OP_DONT_CARE) { skip |= LogError( device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551", "vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a storeOp " "equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } } } } if (!skip) { safe_VkRenderPassCreateInfo2 create_info_2; ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr(), "vkCreateRenderPass()"); } return skip; } // VK_KHR_depth_stencil_resolve was added with a requirement on VK_KHR_create_renderpass2 so this will never be able to use // VkRenderPassCreateInfo bool CoreChecks::ValidateDepthStencilResolve(const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; // If the pNext list of VkSubpassDescription2 includes a VkSubpassDescriptionDepthStencilResolve structure, // then that structure describes depth/stencil resolve operations for the subpass. for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass.pNext); // All of the VUs are wrapped in the wording: // "If pDepthStencilResolveAttachment is not NULL" if (resolve == nullptr || resolve->pDepthStencilResolveAttachment == nullptr) { continue; } // The spec says // "If pDepthStencilAttachment is NULL, or if its attachment index is VK_ATTACHMENT_UNUSED, it indicates that no // depth/stencil attachment will be used in the subpass." if (subpass.pDepthStencilAttachment == nullptr) { continue; } else if (subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) { // while should be ignored, this is an explicit VU and some drivers will crash if this is let through skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03177", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %" PRIu32 ", but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); continue; } const uint32_t ds_attachment = subpass.pDepthStencilAttachment->attachment; const uint32_t resolve_attachment = resolve->pDepthStencilResolveAttachment->attachment; // ValidateAttachmentIndex() should catch if this is invalid, but skip to avoid crashing if (ds_attachment >= pCreateInfo->attachmentCount) { continue; } // All VUs in VkSubpassDescriptionDepthStencilResolve are wrapped with language saying it is not unused if (resolve_attachment == VK_ATTACHMENT_UNUSED) { continue; } if (resolve_attachment >= pCreateInfo->attachmentCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pSubpasses-06473", "%s: pDepthStencilResolveAttachment %" PRIu32 " must be less than attachmentCount %" PRIu32 " of for this render pass.", function_name, resolve_attachment, pCreateInfo->attachmentCount); // if the index is invalid need to skip everything else to prevent out of bounds index accesses crashing continue; } const VkFormat ds_attachment_format = pCreateInfo->pAttachments[ds_attachment].format; const VkFormat resolve_attachment_format = pCreateInfo->pAttachments[resolve_attachment].format; // "depthResolveMode is ignored if the VkFormat of the pDepthStencilResolveAttachment does not have a depth component" bool resolve_has_depth = FormatHasDepth(resolve_attachment_format); // "stencilResolveMode is ignored if the VkFormat of the pDepthStencilResolveAttachment does not have a stencil component" bool resolve_has_stencil = FormatHasStencil(resolve_attachment_format); if (resolve_has_depth) { if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE || resolve->depthResolveMode & phys_dev_props_core12.supportedDepthResolveModes)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-depthResolveMode-03183", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with invalid depthResolveMode (%s), must be VK_RESOLVE_MODE_NONE or a value from " "supportedDepthResolveModes (%s).", function_name, i, string_VkResolveModeFlagBits(resolve->depthResolveMode), string_VkResolveModeFlags(phys_dev_props_core12.supportedDepthResolveModes).c_str()); } } if (resolve_has_stencil) { if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE || resolve->stencilResolveMode & phys_dev_props_core12.supportedStencilResolveModes)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-stencilResolveMode-03184", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with invalid stencilResolveMode (%s), must be VK_RESOLVE_MODE_NONE or a value from " "supportedStencilResolveModes (%s).", function_name, i, string_VkResolveModeFlagBits(resolve->stencilResolveMode), string_VkResolveModeFlags(phys_dev_props_core12.supportedStencilResolveModes).c_str()); } } if (resolve_has_depth && resolve_has_stencil) { if (phys_dev_props_core12.independentResolve == VK_FALSE && phys_dev_props_core12.independentResolveNone == VK_FALSE && !(resolve->depthResolveMode == resolve->stencilResolveMode)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03185", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure. The values of depthResolveMode (%s) and stencilResolveMode (%s) must be identical.", function_name, i, string_VkResolveModeFlagBits(resolve->depthResolveMode), string_VkResolveModeFlagBits(resolve->stencilResolveMode)); } if (phys_dev_props_core12.independentResolve == VK_FALSE && phys_dev_props_core12.independentResolveNone == VK_TRUE && !(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE || resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03186", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure. The values of depthResolveMode (%s) and stencilResolveMode (%s) must be identical, or " "one of them must be VK_RESOLVE_MODE_NONE.", function_name, i, string_VkResolveModeFlagBits(resolve->depthResolveMode), string_VkResolveModeFlagBits(resolve->stencilResolveMode)); } } // Same VU, but better error message if one of the resolves are ignored if (resolve_has_depth && !resolve_has_stencil && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve structure with resolve " "attachment %" PRIu32 ", but the depth resolve mode is VK_RESOLVE_MODE_NONE (stencil resolve mode is " "ignored due to format not having stencil component).", function_name, i, resolve_attachment); } else if (!resolve_has_depth && resolve_has_stencil && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve structure with resolve " "attachment %" PRIu32 ", but the stencil resolve mode is VK_RESOLVE_MODE_NONE (depth resolve mode is " "ignored due to format not having depth component).", function_name, i, resolve_attachment); } else if (resolve_has_depth && resolve_has_stencil && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve structure with resolve " "attachment %" PRIu32 ", but both depth and stencil resolve modes are VK_RESOLVE_MODE_NONE.", function_name, i, resolve_attachment); } const uint32_t resolve_depth_size = FormatDepthSize(resolve_attachment_format); const uint32_t resolve_stencil_size = FormatStencilSize(resolve_attachment_format); if (resolve_depth_size > 0 && ((FormatDepthSize(ds_attachment_format) != resolve_depth_size) || (FormatDepthNumericalType(ds_attachment_format) != FormatDepthNumericalType(ds_attachment_format)))) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03181", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %" PRIu32 " which has a depth component (size %" PRIu32 "). The depth component " "of pDepthStencilAttachment must have the same number of bits (currently %" PRIu32 ") and the same numerical type.", function_name, i, resolve_attachment, resolve_depth_size, FormatDepthSize(ds_attachment_format)); } if (resolve_stencil_size > 0 && ((FormatStencilSize(ds_attachment_format) != resolve_stencil_size) || (FormatStencilNumericalType(ds_attachment_format) != FormatStencilNumericalType(resolve_attachment_format)))) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03182", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %" PRIu32 " which has a stencil component (size %" PRIu32 "). The stencil component " "of pDepthStencilAttachment must have the same number of bits (currently %" PRIu32 ") and the same numerical type.", function_name, i, resolve_attachment, resolve_stencil_size, FormatStencilSize(ds_attachment_format)); } if (pCreateInfo->pAttachments[ds_attachment].samples == VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03179", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %" PRIu32 ". However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.", function_name, i, resolve_attachment); } if (pCreateInfo->pAttachments[resolve_attachment].samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03180", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %" PRIu32 " which has sample count=VK_SAMPLE_COUNT_1_BIT.", function_name, i, resolve_attachment); } const VkFormatFeatureFlags2KHR potential_format_features = GetPotentialFormatFeatures(resolve_attachment_format); if ((potential_format_features & VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR) == 0) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-02651", "%s: Subpass %" PRIu32 " includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %" PRIu32 " with a format (%s) whose features do not contain VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", function_name, i, resolve_attachment, string_VkFormat(resolve_attachment_format)); } // VK_QCOM_render_pass_shader_resolve check of depth/stencil attachmnent if ((subpass.flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-flags-04908", "%s: Subpass %" PRIu32 " enables shader resolve, which requires the depth/stencil resolve attachment" " must be VK_ATTACHMENT_UNUSED, but a reference to attachment %" PRIu32 " was found instead.", function_name, i, resolve_attachment); } } return skip; } bool CoreChecks::ValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, const char *function_name) const { bool skip = false; if (IsExtEnabled(device_extensions.vk_khr_depth_stencil_resolve)) { skip |= ValidateDepthStencilResolve(pCreateInfo, function_name); } skip |= ValidateFragmentShadingRateAttachments(device, pCreateInfo); safe_VkRenderPassCreateInfo2 create_info_2(pCreateInfo); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr(), function_name); return skip; } bool CoreChecks::ValidateFragmentShadingRateAttachments(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo) const { bool skip = false; if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { for (uint32_t attachment_description = 0; attachment_description < pCreateInfo->attachmentCount; ++attachment_description) { std::vector<uint32_t> used_as_fragment_shading_rate_attachment; // Prepass to find any use as a fragment shading rate attachment structures and validate them independently for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(pCreateInfo->pSubpasses[subpass].pNext); if (fragment_shading_rate_attachment && fragment_shading_rate_attachment->pFragmentShadingRateAttachment) { const VkAttachmentReference2 &attachment_reference = *(fragment_shading_rate_attachment->pFragmentShadingRateAttachment); if (attachment_reference.attachment == attachment_description) { used_as_fragment_shading_rate_attachment.push_back(subpass); } if (((pCreateInfo->flags & VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM) != 0) && (attachment_reference.attachment != VK_ATTACHMENT_UNUSED)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-flags-04521", "vkCreateRenderPass2: Render pass includes VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM but " "a fragment shading rate attachment is specified in subpass %u.", subpass); } if (attachment_reference.attachment != VK_ATTACHMENT_UNUSED) { const VkFormatFeatureFlags2KHR potential_format_features = GetPotentialFormatFeatures(pCreateInfo->pAttachments[attachment_reference.attachment].format); if (!(potential_format_features & VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-04586", "vkCreateRenderPass2: Attachment description %u is used in subpass %u as a fragment " "shading rate attachment, but specifies format %s, which does not support " "VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.", attachment_reference.attachment, subpass, string_VkFormat(pCreateInfo->pAttachments[attachment_reference.attachment].format)); } if (attachment_reference.layout != VK_IMAGE_LAYOUT_GENERAL && attachment_reference.layout != VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04524", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u specifies a layout of %s.", subpass, string_VkImageLayout(attachment_reference.layout)); } if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width)) { skip |= LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04525", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a " "non-power-of-two texel width of %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width < phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04526", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which " "is lower than the advertised minimum width %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04527", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which " "is higher than the advertised maximum width %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width); } if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height)) { skip |= LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04528", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a " "non-power-of-two texel height of %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height < phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04529", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u " "which is lower than the advertised minimum height %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04530", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u " "which is higher than the advertised maximum height %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height); } uint32_t aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width / fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height; uint32_t inverse_aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height / fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width; if (aspect_ratio > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04531", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, " "which has an aspect ratio %u, which is higher than the advertised maximum aspect ratio %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, aspect_ratio, phys_dev_ext_props.fragment_shading_rate_props .maxFragmentShadingRateAttachmentTexelSizeAspectRatio); } if (inverse_aspect_ratio > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04532", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, " "which has an inverse aspect ratio of %u, which is higher than the advertised maximum aspect ratio " "%u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, inverse_aspect_ratio, phys_dev_ext_props.fragment_shading_rate_props .maxFragmentShadingRateAttachmentTexelSizeAspectRatio); } } } } // Lambda function turning a vector of integers into a string auto vector_to_string = [&](std::vector<uint32_t> vector) { std::stringstream ss; size_t size = vector.size(); for (size_t i = 0; i < used_as_fragment_shading_rate_attachment.size(); i++) { if (size == 2 && i == 1) { ss << " and "; } else if (size > 2 && i == size - 2) { ss << ", and "; } else if (i != 0) { ss << ", "; } ss << vector[i]; } return ss.str(); }; // Search for other uses of the same attachment if (!used_as_fragment_shading_rate_attachment.empty()) { for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { const VkSubpassDescription2 &subpass_info = pCreateInfo->pSubpasses[subpass]; const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve_attachment = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_info.pNext); std::string fsr_attachment_subpasses_string = vector_to_string(used_as_fragment_shading_rate_attachment); for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) { if (subpass_info.pColorAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as color attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) { if (subpass_info.pResolveAttachments && subpass_info.pResolveAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as color resolve attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } for (uint32_t attachment = 0; attachment < subpass_info.inputAttachmentCount; ++attachment) { if (subpass_info.pInputAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as input attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } if (subpass_info.pDepthStencilAttachment) { if (subpass_info.pDepthStencilAttachment->attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as the depth/stencil attachment in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), subpass); } } if (depth_stencil_resolve_attachment && depth_stencil_resolve_attachment->pDepthStencilResolveAttachment) { if (depth_stencil_resolve_attachment->pDepthStencilResolveAttachment->attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as the depth/stencil resolve attachment in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), subpass); } } } } } } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2KHR()"); } bool CoreChecks::PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2()"); } bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const { bool skip = false; if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { skip |= LogError(pCB->commandBuffer(), error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name); } return skip; } bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin, const char *func_name) const { bool skip = false; bool device_group = false; uint32_t device_group_area_count = 0; const VkDeviceGroupRenderPassBeginInfo *device_group_render_pass_begin_info = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (IsExtEnabled(device_extensions.vk_khr_device_group)) { device_group = true; if (device_group_render_pass_begin_info) { device_group_area_count = device_group_render_pass_begin_info->deviceRenderAreaCount; } } auto framebuffer_state = Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer); const auto *framebuffer_info = &framebuffer_state->createInfo; if (device_group && device_group_area_count > 0) { for (uint32_t i = 0; i < device_group_render_pass_begin_info->deviceRenderAreaCount; ++i) { const auto &deviceRenderArea = device_group_render_pass_begin_info->pDeviceRenderAreas[i]; if (deviceRenderArea.offset.x < 0) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-offset-06166", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, " "VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "].offset.x is negative (%" PRIi32 ").", func_name, i, deviceRenderArea.offset.x); } if (deviceRenderArea.offset.y < 0) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-offset-06167", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, " "VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "].offset.y is negative (%" PRIi32 ").", func_name, i, deviceRenderArea.offset.y); } if ((deviceRenderArea.offset.x + deviceRenderArea.extent.width) > framebuffer_info->width) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02856", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, " "VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "] offset.x (%" PRIi32 ") + extent.width (%" PRIi32 ") is greater than framebuffer width (%" PRIi32 ").", func_name, i, deviceRenderArea.offset.x, deviceRenderArea.extent.width, framebuffer_info->width); } if ((deviceRenderArea.offset.y + deviceRenderArea.extent.height) > framebuffer_info->height) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02857", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, " "VkDeviceGroupRenderPassBeginInfo::pDeviceRenderAreas[%" PRIu32 "] offset.y (%" PRIi32 ") + extent.height (%" PRIi32 ") is greater than framebuffer height (%" PRIi32 ").", func_name, i, deviceRenderArea.offset.y, deviceRenderArea.extent.height, framebuffer_info->height); } } } else { if (pRenderPassBegin->renderArea.offset.x < 0) { if (device_group) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02850", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext " "of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its " "deviceRenderAreaCount is 0, renderArea.offset.x is negative (%" PRIi32 ") .", func_name, pRenderPassBegin->renderArea.offset.x); } else { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02846", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, " "renderArea.offset.x is negative (%" PRIi32 ") .", func_name, pRenderPassBegin->renderArea.offset.x); } } if (pRenderPassBegin->renderArea.offset.y < 0) { if (device_group) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02851", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext " "of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its " "deviceRenderAreaCount is 0, renderArea.offset.y is negative (%" PRIi32 ") .", func_name, pRenderPassBegin->renderArea.offset.y); } else { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02847", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, " "renderArea.offset.y is negative (%" PRIi32 ") .", func_name, pRenderPassBegin->renderArea.offset.y); } } if ((pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > framebuffer_info->width) { if (device_group) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02852", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext " "of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its " "deviceRenderAreaCount is 0, renderArea.offset.x (%" PRIi32 ") + renderArea.extent.width (%" PRIi32 ") is greater than framebuffer width (%" PRIi32 ").", func_name, pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.extent.width, framebuffer_info->width); } else { skip |= LogError( pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02848", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, renderArea.offset.x " "(%" PRIi32 ") + renderArea.extent.width (%" PRIi32 ") is greater than framebuffer width (%" PRIi32 ").", func_name, pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.extent.width, framebuffer_info->width); } } if ((pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > framebuffer_info->height) { if (device_group) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-pNext-02853", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer and pNext " "of VkRenderPassBeginInfo does not contain VkDeviceGroupRenderPassBeginInfo or its " "deviceRenderAreaCount is 0, renderArea.offset.y (%" PRIi32 ") + renderArea.extent.height (%" PRIi32 ") is greater than framebuffer height (%" PRIi32 ").", func_name, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.height, framebuffer_info->height); } else { skip |= LogError( pRenderPassBegin->renderPass, "VUID-VkRenderPassBeginInfo-renderArea-02849", "%s: Cannot execute a render pass with renderArea not within the bound of the framebuffer, renderArea.offset.y " "(%" PRIi32 ") + renderArea.extent.height (%" PRIi32 ") is greater than framebuffer height (%" PRIi32 ").", func_name, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.height, framebuffer_info->height); } } } return skip; } bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo, const char *func_name) const { bool skip = false; const VkRenderPassAttachmentBeginInfo *render_pass_attachment_begin_info = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBeginInfo->pNext); if (render_pass_attachment_begin_info && render_pass_attachment_begin_info->attachmentCount != 0) { auto framebuffer_state = Get<FRAMEBUFFER_STATE>(pRenderPassBeginInfo->framebuffer); const auto *framebuffer_create_info = &framebuffer_state->createInfo; const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(framebuffer_create_info->pNext); if ((framebuffer_create_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03207", "%s: Image views specified at render pass begin, but framebuffer not created with " "VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT", func_name); } else if (framebuffer_attachments_create_info) { if (framebuffer_attachments_create_info->attachmentImageInfoCount != render_pass_attachment_begin_info->attachmentCount) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03208", "%s: %u image views specified at render pass begin, but framebuffer " "created expecting %u attachments", func_name, render_pass_attachment_begin_info->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount); } else { auto render_pass_state = Get<RENDER_PASS_STATE>(pRenderPassBeginInfo->renderPass); const auto *render_pass_create_info = &render_pass_state->createInfo; for (uint32_t i = 0; i < render_pass_attachment_begin_info->attachmentCount; ++i) { const auto image_view_state = Get<IMAGE_VIEW_STATE>(render_pass_attachment_begin_info->pAttachments[i]); const VkImageViewCreateInfo *image_view_create_info = &image_view_state->create_info; const auto &subresource_range = image_view_state->normalized_subresource_range; const VkFramebufferAttachmentImageInfo *framebuffer_attachment_image_info = &framebuffer_attachments_create_info->pAttachmentImageInfos[i]; const auto *image_create_info = &image_view_state->image_state->createInfo; if (framebuffer_attachment_image_info->flags != image_create_info->flags) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03209", "%s: Image view #%u created from an image with flags set as 0x%X, " "but image info #%u used to create the framebuffer had flags set as 0x%X", func_name, i, image_create_info->flags, i, framebuffer_attachment_image_info->flags); } if (framebuffer_attachment_image_info->usage != image_view_state->inherited_usage) { // Give clearer message if this error is due to the "inherited" part or not if (image_create_info->usage == image_view_state->inherited_usage) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627", "%s: Image view #%u created from an image with usage set as 0x%X, " "but image info #%u used to create the framebuffer had usage set as 0x%X", func_name, i, image_create_info->usage, i, framebuffer_attachment_image_info->usage); } else { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627", "%s: Image view #%u created from an image with usage set as 0x%X but using " "VkImageViewUsageCreateInfo the inherited usage is the subset 0x%X " "and the image info #%u used to create the framebuffer had usage set as 0x%X", func_name, i, image_create_info->usage, image_view_state->inherited_usage, i, framebuffer_attachment_image_info->usage); } } const auto view_width = image_create_info->extent.width >> subresource_range.baseMipLevel; if (framebuffer_attachment_image_info->width != view_width) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03211", "%s: For VkRenderPassAttachmentBeginInfo::pAttachments[%" PRIu32 "], VkImageView width (%" PRIu32 ") at mip level %" PRIu32 " (%" PRIu32 ") != VkFramebufferAttachmentsCreateInfo::pAttachments[%" PRIu32 "]::width (%" PRIu32 ").", func_name, i, image_create_info->extent.width, subresource_range.baseMipLevel, view_width, i, framebuffer_attachment_image_info->width); } const bool is_1d = (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_1D) || (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY); const auto view_height = (!is_1d) ? image_create_info->extent.height >> subresource_range.baseMipLevel : image_create_info->extent.height; if (framebuffer_attachment_image_info->height != view_height) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03212", "%s: For VkRenderPassAttachmentBeginInfo::pAttachments[%" PRIu32 "], VkImageView height (%" PRIu32 ") at mip level %" PRIu32 " (%" PRIu32 ") != VkFramebufferAttachmentsCreateInfo::pAttachments[%" PRIu32 "]::height (%" PRIu32 ").", func_name, i, image_create_info->extent.height, subresource_range.baseMipLevel, view_height, i, framebuffer_attachment_image_info->height); } if (framebuffer_attachment_image_info->layerCount != subresource_range.layerCount) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03213", "%s: Image view #%u created with a subresource range with a layerCount of %u, " "but image info #%u used to create the framebuffer had layerCount set as %u", func_name, i, subresource_range.layerCount, i, framebuffer_attachment_image_info->layerCount); } const VkImageFormatListCreateInfo *image_format_list_create_info = LvlFindInChain<VkImageFormatListCreateInfo>(image_create_info->pNext); if (image_format_list_create_info) { if (image_format_list_create_info->viewFormatCount != framebuffer_attachment_image_info->viewFormatCount) { skip |= LogError( pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03214", "VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, " "but image info #%u used to create the framebuffer had viewFormatCount set as %u", i, image_format_list_create_info->viewFormatCount, i, framebuffer_attachment_image_info->viewFormatCount); } for (uint32_t j = 0; j < image_format_list_create_info->viewFormatCount; ++j) { bool format_found = false; for (uint32_t k = 0; k < framebuffer_attachment_image_info->viewFormatCount; ++k) { if (image_format_list_create_info->pViewFormats[j] == framebuffer_attachment_image_info->pViewFormats[k]) { format_found = true; } } if (!format_found) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03215", "VkRenderPassBeginInfo: Image view #%u created with an image including the format " "%s in its view format list, " "but image info #%u used to create the framebuffer does not include this format", i, string_VkFormat(image_format_list_create_info->pViewFormats[j]), i); } } } if (render_pass_create_info->pAttachments[i].format != image_view_create_info->format) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03216", "%s: Image view #%u created with a format of %s, " "but render pass attachment description #%u created with a format of %s", func_name, i, string_VkFormat(image_view_create_info->format), i, string_VkFormat(render_pass_create_info->pAttachments[i].format)); } if (render_pass_create_info->pAttachments[i].samples != image_create_info->samples) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03217", "%s: Image view #%u created with an image with %s samples, " "but render pass attachment description #%u created with %s samples", func_name, i, string_VkSampleCountFlagBits(image_create_info->samples), i, string_VkSampleCountFlagBits(render_pass_create_info->pAttachments[i].samples)); } if (subresource_range.levelCount != 1) { skip |= LogError(render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218", "%s: Image view #%u created with multiple (%u) mip levels.", func_name, i, subresource_range.levelCount); } if (IsIdentitySwizzle(image_view_create_info->components) == false) { skip |= LogError( render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219", "%s: Image view #%u created with non-identity swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", func_name, i, string_VkComponentSwizzle(image_view_create_info->components.r), string_VkComponentSwizzle(image_view_create_info->components.g), string_VkComponentSwizzle(image_view_create_info->components.b), string_VkComponentSwizzle(image_view_create_info->components.a)); } if (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_3D) { skip |= LogError(render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114", "%s: Image view #%u created with type VK_IMAGE_VIEW_TYPE_3D", func_name, i); } } } } } return skip; } // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the // [load|store]Op flag must be checked // TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately. template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) { if (color_depth_op != op && stencil_op != op) { return false; } bool check_color_depth_load_op = !FormatIsStencilOnly(format); bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op; return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op))); } bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo *pRenderPassBegin, CMD_TYPE cmd_type) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); const char *function_name = CommandTypeString(cmd_type); assert(cb_state); if (pRenderPassBegin) { auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass); auto fb_state = Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer); if (rp_state) { uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR // Handle extension struct from EXT_sample_locations const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info = LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext); if (sample_locations_begin_info) { for (uint32_t i = 0; i < sample_locations_begin_info->attachmentInitialSampleLocationsCount; ++i) { const VkAttachmentSampleLocationsEXT &sample_location = sample_locations_begin_info->pAttachmentInitialSampleLocations[i]; skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name); if (sample_location.attachmentIndex >= rp_state->createInfo.attachmentCount) { skip |= LogError(device, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", "%s: Attachment index %u specified by attachment sample locations %u is greater than the " "attachment count of %u for the render pass being begun.", function_name, sample_location.attachmentIndex, i, rp_state->createInfo.attachmentCount); } } for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) { const VkSubpassSampleLocationsEXT &sample_location = sample_locations_begin_info->pPostSubpassSampleLocations[i]; skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name); if (sample_location.subpassIndex >= rp_state->createInfo.subpassCount) { skip |= LogError( device, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", "%s: Subpass index %u specified by subpass sample locations %u is greater than the subpass count " "of %u for the render pass being begun.", function_name, sample_location.subpassIndex, i, rp_state->createInfo.subpassCount); } } } for (uint32_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) { auto attachment = &rp_state->createInfo.pAttachments[i]; if (FormatSpecificLoadAndStoreOpSettings(attachment->format, attachment->loadOp, attachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_CLEAR)) { clear_op_size = static_cast<uint32_t>(i) + 1; if (FormatHasDepth(attachment->format) && pRenderPassBegin->pClearValues) { skip |= ValidateClearDepthStencilValue(commandBuffer, pRenderPassBegin->pClearValues[i].depthStencil, function_name); } } } if (clear_op_size > pRenderPassBegin->clearValueCount) { skip |= LogError(rp_state->renderPass(), "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there " "must be at least %u entries in pClearValues array to account for the highest index attachment in " "%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by " "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments " "that aren't cleared they will be ignored.", function_name, pRenderPassBegin->clearValueCount, clear_op_size, report_data->FormatHandle(rp_state->renderPass()).c_str(), clear_op_size, clear_op_size - 1); } skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin, function_name); skip |= VerifyRenderAreaBounds(pRenderPassBegin, function_name); skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state.get(), pRenderPassBegin, fb_state.get()); if (fb_state->rp_state->renderPass() != rp_state->renderPass()) { skip |= ValidateRenderPassCompatibility("render pass", rp_state.get(), "framebuffer", fb_state->rp_state.get(), function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904"); } skip |= ValidateDependencies(fb_state.get(), rp_state.get()); skip |= ValidateCmd(cb_state.get(), cmd_type); } } auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state.get(), chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907"); if (chained_device_group_struct->deviceRenderAreaCount != 0 && chained_device_group_struct->deviceRenderAreaCount != physical_device_count) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908", "%s: deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".", function_name, chained_device_group_struct->deviceRenderAreaCount, physical_device_count); } } return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin, CMD_BEGINRENDERPASS); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin, CMD_BEGINRENDERPASS2KHR); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin, CMD_BEGINRENDERPASS2); return skip; } void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) { if (!pRenderPassBegin) { return; } auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); auto render_pass_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass); auto framebuffer = Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer); if (render_pass_state) { // transition attachments to the correct layouts for beginning of renderPass and first subpass TransitionBeginRenderPassLayouts(cb_state.get(), render_pass_state.get(), framebuffer.get()); } } void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) { StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) { StateTracker::PreCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer, CMD_TYPE cmd_type) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *function_name = CommandTypeString(cmd_type); skip |= ValidateCmd(cb_state.get(), cmd_type); auto subpass_count = cb_state->activeRenderPass->createInfo.subpassCount; if (cb_state->activeSubpass == subpass_count - 1) { vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-None-03102" : "VUID-vkCmdNextSubpass-None-00909"; skip |= LogError(commandBuffer, vuid, "%s: Attempted to advance beyond final subpass.", function_name); } return skip; } bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer, CMD_NEXTSUBPASS); } bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer, CMD_NEXTSUBPASS2KHR); } bool CoreChecks::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer, CMD_NEXTSUBPASS2); } void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); auto framebuffer = Get<FRAMEBUFFER_STATE>(cb_state->activeRenderPassBeginInfo.framebuffer); TransitionSubpassLayouts(cb_state.get(), cb_state->activeRenderPass.get(), cb_state->activeSubpass, framebuffer.get()); } void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents); RecordCmdNextSubpassLayouts(commandBuffer, contents); } void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) { StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents); } void CoreChecks::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) { StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer, CMD_TYPE cmd_type) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *function_name = CommandTypeString(cmd_type); RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass.get(); if (rp_state) { if ((cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) && !rp_state->use_dynamic_rendering) { vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-03103" : "VUID-vkCmdEndRenderPass-None-00910"; skip |= LogError(commandBuffer, vuid, "%s: Called before reaching final subpass.", function_name); } if (rp_state->use_dynamic_rendering) { vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-06171" : "VUID-vkCmdEndRenderPass-None-06170"; skip |= LogError(commandBuffer, vuid, "%s: Called when the render pass instance was begun with vkCmdBeginRenderingKHR().", function_name); } } skip |= ValidateCmd(cb_state.get(), cmd_type); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer, CMD_ENDRENDERPASS); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer, CMD_ENDRENDERPASS2KHR); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer, CMD_ENDRENDERPASS2); return skip; } void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) { auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); TransitionFinalSubpassLayouts(cb_state.get(), cb_state->activeRenderPassBeginInfo.ptr(), cb_state->activeFramebuffer.get()); } void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) { // Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need. RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer); } void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) { // Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need. RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo); } void CoreChecks::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) { RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo); } bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer, const CMD_BUFFER_STATE *pSubCB, const char *caller) const { bool skip = false; if (!pSubCB->beginInfo.pInheritanceInfo) { return skip; } VkFramebuffer primary_fb = pCB->activeFramebuffer ? pCB->activeFramebuffer->framebuffer() : VK_NULL_HANDLE; VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; if (secondary_fb != VK_NULL_HANDLE) { if (primary_fb != secondary_fb) { LogObjectList objlist(primaryBuffer); objlist.add(secondaryBuffer); objlist.add(secondary_fb); objlist.add(primary_fb); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099", "vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s" " that is not the same as the primary command buffer's current active %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(), report_data->FormatHandle(primary_fb).c_str()); } auto fb = Get<FRAMEBUFFER_STATE>(secondary_fb); if (!fb) { LogObjectList objlist(primaryBuffer); objlist.add(secondaryBuffer); objlist.add(secondary_fb); skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str()); return skip; } } return skip; } bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const { bool skip = false; layer_data::unordered_set<int> active_types; if (!disabled[query_validation]) { for (const auto &query_object : pCB->activeQueries) { auto query_pool_state = Get<QUERY_POOL_STATE>(query_object.pool); if (query_pool_state) { if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && pSubCB->beginInfo.pInheritanceInfo) { VkQueryPipelineStatisticFlags cmd_buf_statistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; if ((cmd_buf_statistics & query_pool_state->createInfo.pipelineStatistics) != cmd_buf_statistics) { LogObjectList objlist(pCB->commandBuffer()); objlist.add(query_object.pool); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-00104", "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.", report_data->FormatHandle(pCB->commandBuffer()).c_str(), report_data->FormatHandle(query_object.pool).c_str()); } } active_types.insert(query_pool_state->createInfo.queryType); } } for (const auto &query_object : pSubCB->startedQueries) { auto query_pool_state = Get<QUERY_POOL_STATE>(query_object.pool); if (query_pool_state && active_types.count(query_pool_state->createInfo.queryType)) { LogObjectList objlist(pCB->commandBuffer()); objlist.add(query_object.pool); skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" " of type %d but a query of that type has been started on secondary %s.", report_data->FormatHandle(pCB->commandBuffer()).c_str(), report_data->FormatHandle(query_object.pool).c_str(), query_pool_state->createInfo.queryType, report_data->FormatHandle(pSubCB->commandBuffer()).c_str()); } } } const auto primary_pool = pCB->command_pool; const auto secondary_pool = pSubCB->command_pool; if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) { LogObjectList objlist(pSubCB->commandBuffer()); objlist.add(pCB->commandBuffer()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00094", "vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary " "%s created in queue family %d.", report_data->FormatHandle(pCB->commandBuffer()).c_str(), primary_pool->queueFamilyIndex, report_data->FormatHandle(pSubCB->commandBuffer()).c_str(), secondary_pool->queueFamilyIndex); } return skip; } // Object that simulates the inherited viewport/scissor state as the device executes the called secondary command buffers. // Visit the calling primary command buffer first, then the called secondaries in order. // Contact David Zhao Akeley <[email protected]> for clarifications and bug fixes. class CoreChecks::ViewportScissorInheritanceTracker { static_assert(4 == sizeof(CMD_BUFFER_STATE::viewportMask), "Adjust max_viewports to match viewportMask bit width"); static constexpr uint32_t kMaxViewports = 32, kNotTrashed = uint32_t(-2), kTrashedByPrimary = uint32_t(-1); const ValidationObject &validation_; const CMD_BUFFER_STATE *primary_state_ = nullptr; uint32_t viewport_mask_; uint32_t scissor_mask_; uint32_t viewport_trashed_by_[kMaxViewports]; // filled in VisitPrimary. uint32_t scissor_trashed_by_[kMaxViewports]; VkViewport viewports_to_inherit_[kMaxViewports]; uint32_t viewport_count_to_inherit_; // 0 if viewport count (EXT state) has never been defined (but not trashed) uint32_t scissor_count_to_inherit_; // 0 if scissor count (EXT state) has never been defined (but not trashed) uint32_t viewport_count_trashed_by_; uint32_t scissor_count_trashed_by_; public: ViewportScissorInheritanceTracker(const ValidationObject &validation) : validation_(validation) {} bool VisitPrimary(const CMD_BUFFER_STATE *primary_state) { assert(!primary_state_); primary_state_ = primary_state; viewport_mask_ = primary_state->viewportMask | primary_state->viewportWithCountMask; scissor_mask_ = primary_state->scissorMask | primary_state->scissorWithCountMask; for (uint32_t n = 0; n < kMaxViewports; ++n) { uint32_t bit = uint32_t(1) << n; viewport_trashed_by_[n] = primary_state->trashedViewportMask & bit ? kTrashedByPrimary : kNotTrashed; scissor_trashed_by_[n] = primary_state->trashedScissorMask & bit ? kTrashedByPrimary : kNotTrashed; if (viewport_mask_ & bit) { viewports_to_inherit_[n] = primary_state->dynamicViewports[n]; } } viewport_count_to_inherit_ = primary_state->viewportWithCountCount; scissor_count_to_inherit_ = primary_state->scissorWithCountCount; viewport_count_trashed_by_ = primary_state->trashedViewportCount ? kTrashedByPrimary : kNotTrashed; scissor_count_trashed_by_ = primary_state->trashedScissorCount ? kTrashedByPrimary : kNotTrashed; return false; } bool VisitSecondary(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) { bool skip = false; if (secondary_state->inheritedViewportDepths.empty()) { skip |= VisitSecondaryNoInheritance(cmd_buffer_idx, secondary_state); } else { skip |= VisitSecondaryInheritance(cmd_buffer_idx, secondary_state); } // See note at end of VisitSecondaryNoInheritance. if (secondary_state->trashedViewportCount) { viewport_count_trashed_by_ = cmd_buffer_idx; } if (secondary_state->trashedScissorCount) { scissor_count_trashed_by_ = cmd_buffer_idx; } return skip; } private: // Track state inheritance as specified by VK_NV_inherited_scissor_viewport, including states // overwritten to undefined value by bound pipelines with non-dynamic state. bool VisitSecondaryNoInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) { viewport_mask_ |= secondary_state->viewportMask | secondary_state->viewportWithCountMask; scissor_mask_ |= secondary_state->scissorMask | secondary_state->scissorWithCountMask; for (uint32_t n = 0; n < kMaxViewports; ++n) { uint32_t bit = uint32_t(1) << n; if ((secondary_state->viewportMask | secondary_state->viewportWithCountMask) & bit) { viewports_to_inherit_[n] = secondary_state->dynamicViewports[n]; viewport_trashed_by_[n] = kNotTrashed; } if ((secondary_state->scissorMask | secondary_state->scissorWithCountMask) & bit) { scissor_trashed_by_[n] = kNotTrashed; } if (secondary_state->viewportWithCountCount != 0) { viewport_count_to_inherit_ = secondary_state->viewportWithCountCount; viewport_count_trashed_by_ = kNotTrashed; } if (secondary_state->scissorWithCountCount != 0) { scissor_count_to_inherit_ = secondary_state->scissorWithCountCount; scissor_count_trashed_by_ = kNotTrashed; } // Order of above vs below matters here. if (secondary_state->trashedViewportMask & bit) { viewport_trashed_by_[n] = cmd_buffer_idx; } if (secondary_state->trashedScissorMask & bit) { scissor_trashed_by_[n] = cmd_buffer_idx; } // Check trashing dynamic viewport/scissor count in VisitSecondary (at end) as even secondary command buffers enabling // viewport/scissor state inheritance may define this state statically in bound graphics pipelines. } return false; } // Validate needed inherited state as specified by VK_NV_inherited_scissor_viewport. bool VisitSecondaryInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) { bool skip = false; uint32_t check_viewport_count = 0, check_scissor_count = 0; // Common code for reporting missing inherited state (for a myriad of reasons). auto check_missing_inherit = [&](uint32_t was_ever_defined, uint32_t trashed_by, VkDynamicState state, uint32_t index = 0, uint32_t static_use_count = 0, const VkViewport *inherited_viewport = nullptr, const VkViewport *expected_viewport_depth = nullptr) { if (was_ever_defined && trashed_by == kNotTrashed) { if (state != VK_DYNAMIC_STATE_VIEWPORT) return false; assert(inherited_viewport != nullptr && expected_viewport_depth != nullptr); if (inherited_viewport->minDepth != expected_viewport_depth->minDepth || inherited_viewport->maxDepth != expected_viewport_depth->maxDepth) { return validation_.LogError( primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701", "vkCmdExecuteCommands(): Draw commands in pCommandBuffers[%u] (%s) consume inherited viewport %u %s" "but this state was not inherited as its depth range [%f, %f] does not match " "pViewportDepths[%u] = [%f, %f]", unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str(), unsigned(index), index >= static_use_count ? "(with count) " : "", inherited_viewport->minDepth, inherited_viewport->maxDepth, unsigned(cmd_buffer_idx), expected_viewport_depth->minDepth, expected_viewport_depth->maxDepth); // akeley98 note: This VUID is not ideal; however, there isn't a more relevant VUID as // it isn't illegal in itself to have mismatched inherited viewport depths. // The error only occurs upon attempting to consume the viewport. } else { return false; } } const char *state_name; bool format_index = false; switch (state) { case VK_DYNAMIC_STATE_SCISSOR: state_name = "scissor"; format_index = true; break; case VK_DYNAMIC_STATE_VIEWPORT: state_name = "viewport"; format_index = true; break; case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT: state_name = "dynamic viewport count"; break; case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT: state_name = "dynamic scissor count"; break; default: assert(0); state_name = "<unknown state, report bug>"; break; } std::stringstream ss; ss << "vkCmdExecuteCommands(): Draw commands in pCommandBuffers[" << cmd_buffer_idx << "] (" << validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str() << ") consume inherited " << state_name << " "; if (format_index) { if (index >= static_use_count) { ss << "(with count) "; } ss << index << " "; } ss << "but this state "; if (!was_ever_defined) { ss << "was never defined."; } else if (trashed_by == kTrashedByPrimary) { ss << "was left undefined after vkCmdExecuteCommands or vkCmdBindPipeline (with non-dynamic state) in " "the calling primary command buffer."; } else { ss << "was left undefined after vkCmdBindPipeline (with non-dynamic state) in pCommandBuffers[" << trashed_by << "]."; } return validation_.LogError(primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701", "%s", ss.str().c_str()); }; // Check if secondary command buffer uses viewport/scissor-with-count state, and validate this state if so. if (secondary_state->usedDynamicViewportCount) { if (viewport_count_to_inherit_ == 0 || viewport_count_trashed_by_ != kNotTrashed) { skip |= check_missing_inherit(viewport_count_to_inherit_, viewport_count_trashed_by_, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT); } else { check_viewport_count = viewport_count_to_inherit_; } } if (secondary_state->usedDynamicScissorCount) { if (scissor_count_to_inherit_ == 0 || scissor_count_trashed_by_ != kNotTrashed) { skip |= check_missing_inherit(scissor_count_to_inherit_, scissor_count_trashed_by_, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT); } else { check_scissor_count = scissor_count_to_inherit_; } } // Check the maximum of (viewports used by pipelines with static viewport count, "" dynamic viewport count) // but limit to length of inheritedViewportDepths array and uint32_t bit width (validation layer limit). check_viewport_count = std::min(std::min(kMaxViewports, uint32_t(secondary_state->inheritedViewportDepths.size())), std::max(check_viewport_count, secondary_state->usedViewportScissorCount)); check_scissor_count = std::min(kMaxViewports, std::max(check_scissor_count, secondary_state->usedViewportScissorCount)); if (secondary_state->usedDynamicViewportCount && viewport_count_to_inherit_ > secondary_state->inheritedViewportDepths.size()) { skip |= validation_.LogError( primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701", "vkCmdExecuteCommands(): " "Draw commands in pCommandBuffers[%u] (%s) consume inherited dynamic viewport with count state " "but the dynamic viewport count (%u) exceeds the inheritance limit (viewportDepthCount=%u).", unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str(), unsigned(viewport_count_to_inherit_), unsigned(secondary_state->inheritedViewportDepths.size())); } for (uint32_t n = 0; n < check_viewport_count; ++n) { skip |= check_missing_inherit(viewport_mask_ & uint32_t(1) << n, viewport_trashed_by_[n], VK_DYNAMIC_STATE_VIEWPORT, n, secondary_state->usedViewportScissorCount, &viewports_to_inherit_[n], &secondary_state->inheritedViewportDepths[n]); } for (uint32_t n = 0; n < check_scissor_count; ++n) { skip |= check_missing_inherit(scissor_mask_ & uint32_t(1) << n, scissor_trashed_by_[n], VK_DYNAMIC_STATE_SCISSOR, n, secondary_state->usedViewportScissorCount); } return skip; } }; constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kMaxViewports; constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kNotTrashed; constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kTrashedByPrimary; bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; layer_data::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers; ViewportScissorInheritanceTracker viewport_scissor_inheritance{*this}; if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D) { skip |= viewport_scissor_inheritance.VisitPrimary(cb_state.get()); } bool active_occlusion_query = false; for (const auto& active_query : cb_state->activeQueries) { const auto query_pool_state = Get<QUERY_POOL_STATE>(active_query.pool); if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_OCCLUSION) { active_occlusion_query = true; break; } } if (cb_state->activeRenderPass) { if ((cb_state->activeRenderPass->use_dynamic_rendering == false) && (cb_state->activeSubpassContents != VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS)) { skip |= LogError(commandBuffer, "VUID-vkCmdExecuteCommands-contents-06018", "vkCmdExecuteCommands(): contents must be set to VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS " "when calling vkCmdExecuteCommands() within a render pass instance begun with " "vkCmdBeginRenderPass()."); } if ((cb_state->activeRenderPass->use_dynamic_rendering == true) && !(cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.flags & VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR)) { skip |= LogError(commandBuffer, "VUID-vkCmdExecuteCommands-flags-06024", "vkCmdExecuteCommands(): VkRenderingInfoKHR::flags must include " "VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR when calling vkCmdExecuteCommands() within a " "render pass instance begun with vkCmdBeginRenderingKHR()."); } } for (uint32_t i = 0; i < commandBuffersCount; i++) { const auto sub_cb_state = Get<CMD_BUFFER_STATE>(pCommandBuffers[i]); assert(sub_cb_state); if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D) { skip |= viewport_scissor_inheritance.VisitSecondary(i, sub_cb_state.get()); } if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00088", "vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All " "cmd buffers in pCommandBuffers array must be secondary.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), i); } else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) { if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) { const auto secondary_rp_state = Get<RENDER_PASS_STATE>(sub_cb_state->beginInfo.pInheritanceInfo->renderPass); if (cb_state->activeRenderPass && !(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00096", "vkCmdExecuteCommands(): Secondary %s is executed within a %s " "instance scope, but the Secondary Command Buffer does not have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } else if (!cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00100", "vkCmdExecuteCommands(): Secondary %s is executed outside a render pass " "instance scope, but the Secondary Command Buffer does have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } else if (cb_state->activeRenderPass && (cb_state->activeRenderPass->use_dynamic_rendering == false) && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // Make sure render pass is compatible with parent command buffer pass if has continue if (cb_state->activeRenderPass->renderPass() != secondary_rp_state->renderPass()) { skip |= ValidateRenderPassCompatibility( "primary command buffer", cb_state->activeRenderPass.get(), "secondary command buffer", secondary_rp_state.get(), "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pBeginInfo-06020"); } // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB skip |= ValidateFramebuffer(commandBuffer, cb_state.get(), pCommandBuffers[i], sub_cb_state.get(), "vkCmdExecuteCommands()"); if (!sub_cb_state->cmd_execute_commands_functions.empty()) { // Inherit primary's activeFramebuffer and while running validate functions for (auto &function : sub_cb_state->cmd_execute_commands_functions) { skip |= function(*sub_cb_state, cb_state.get(), cb_state->activeFramebuffer.get()); } } } if (cb_state->activeRenderPass && (cb_state->activeRenderPass->use_dynamic_rendering == false) && (cb_state->activeSubpass != sub_cb_state->beginInfo.pInheritanceInfo->subpass)) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-06019", "vkCmdExecuteCommands(): Secondary %s is executed within a %s " "instance scope begun by vkCmdBeginRenderPass(), but " "VkCommandBufferInheritanceInfo::subpass (%u) does not " "match the current subpass (%u).", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str(), sub_cb_state->beginInfo.pInheritanceInfo->subpass, cb_state->activeSubpass); } else if (cb_state->activeRenderPass && (cb_state->activeRenderPass->use_dynamic_rendering == true)) { if (sub_cb_state->beginInfo.pInheritanceInfo->renderPass != VK_NULL_HANDLE) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pBeginInfo-06025", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkCommandBufferInheritanceInfo::pInheritanceInfo::renderPass is not VK_NULL_HANDLE.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } if (sub_cb_state->activeRenderPass->use_dynamic_rendering_inherited) { if (sub_cb_state->activeRenderPass->inheritance_rendering_info.flags != (cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.flags & ~VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR)) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-flags-06026", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but VkCommandBufferInheritanceRenderingInfoKHR::flags (%u) does " "not match VkRenderingInfoKHR::flags (%u), excluding " "VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str(), sub_cb_state->activeRenderPass->inheritance_rendering_info.flags, cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.flags); } if (sub_cb_state->activeRenderPass->inheritance_rendering_info.colorAttachmentCount != cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-colorAttachmentCount-06027", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkCommandBufferInheritanceRenderingInfoKHR::colorAttachmentCount (%u) does " "not match VkRenderingInfoKHR::colorAttachmentCount (%u).", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str(), sub_cb_state->activeRenderPass->inheritance_rendering_info.colorAttachmentCount, cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount); } for (uint32_t index = 0; index < cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount; index++) { if (cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[index] .imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>( cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[index] .imageView); if (image_view_state->create_info.format != sub_cb_state->activeRenderPass->inheritance_rendering_info.pColorAttachmentFormats[index]) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-imageView-06028", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkCommandBufferInheritanceRenderingInfoKHR::pColorAttachmentFormats at index (%u) does " "not match the format of the imageView in VkRenderingInfoKHR::pColorAttachments.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str(), index); } } } if ((cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment != nullptr) && cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>( cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView); if (image_view_state->create_info.format != sub_cb_state->activeRenderPass->inheritance_rendering_info.depthAttachmentFormat) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pDepthAttachment-06029", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkCommandBufferInheritanceRenderingInfoKHR::depthAttachmentFormat does " "not match the format of the imageView in VkRenderingInfoKHR::pDepthAttachment.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } } if ((cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment != nullptr) && cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment->imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>( cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment->imageView); if (image_view_state->create_info.format != sub_cb_state->activeRenderPass->inheritance_rendering_info.stencilAttachmentFormat) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pStencilAttachment-06030", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkCommandBufferInheritanceRenderingInfoKHR::stencilAttachmentFormat does " "not match the format of the imageView in VkRenderingInfoKHR::pStencilAttachment.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } } if (cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.viewMask != sub_cb_state->activeRenderPass->inheritance_rendering_info.viewMask) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-viewMask-06031", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkCommandBufferInheritanceRenderingInfoKHR::viewMask (%u) does " "not match VkRenderingInfoKHR::viewMask (%u).", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str(), sub_cb_state->activeRenderPass->inheritance_rendering_info.viewMask, cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.viewMask); } // VkAttachmentSampleCountInfoAMD == VkAttachmentSampleCountInfoNV const auto amd_sample_count = LvlFindInChain<VkAttachmentSampleCountInfoAMD>( sub_cb_state->activeRenderPass->inheritance_rendering_info.pNext); if (amd_sample_count) { for (uint32_t index = 0; index < cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount; index++) { if (cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[index] .imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>( cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[index] .imageView); if (image_view_state->samples != amd_sample_count->pColorAttachmentSamples[index]) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-pNext-06032", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkAttachmentSampleCountInfo(AMD/NV)::pColorAttachmentSamples at index (%u) " "does " "not match the sample count of the imageView in VkRenderingInfoKHR::pColorAttachments.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str(), index); } } } if ((cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment != nullptr) && cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>( cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView); if (image_view_state->samples != amd_sample_count->depthStencilAttachmentSamples) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-pNext-06033", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkAttachmentSampleCountInfo(AMD/NV)::depthStencilAttachmentSamples does " "not match the sample count of the imageView in VkRenderingInfoKHR::pDepthAttachment.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } } if ((cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment != nullptr) && cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment->imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>(cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info .pStencilAttachment->imageView); if (image_view_state->samples != amd_sample_count->depthStencilAttachmentSamples) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-pNext-06034", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but " "VkAttachmentSampleCountInfo(AMD/NV)::depthStencilAttachmentSamples does " "not match the sample count of the imageView in VkRenderingInfoKHR::pStencilAttachment.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } } } else { for (uint32_t index = 0; index < cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.colorAttachmentCount; index++) { if (cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[index] .imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>( cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pColorAttachments[index] .imageView); if (image_view_state->samples != sub_cb_state->activeRenderPass->inheritance_rendering_info.rasterizationSamples) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-pNext-06035", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but the sample count of the image view at index (%u) of " "VkRenderingInfoKHR::pColorAttachments does not match " "VkCommandBufferInheritanceRenderingInfoKHR::rasterizationSamples.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str(), index); } } } if ((cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment != nullptr) && cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>( cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pDepthAttachment->imageView); if (image_view_state->samples != sub_cb_state->activeRenderPass->inheritance_rendering_info.rasterizationSamples) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-pNext-06036", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but the sample count of the image view for " "VkRenderingInfoKHR::pDepthAttachment does not match " "VkCommandBufferInheritanceRenderingInfoKHR::rasterizationSamples.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } } if ((cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment != nullptr) && cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info.pStencilAttachment->imageView != VK_NULL_HANDLE) { auto image_view_state = Get<IMAGE_VIEW_STATE>(cb_state->activeRenderPass->dynamic_rendering_begin_rendering_info .pStencilAttachment->imageView); if (image_view_state->samples != sub_cb_state->activeRenderPass->inheritance_rendering_info.rasterizationSamples) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-pNext-06037", "vkCmdExecuteCommands(): Secondary %s is executed within a %s instance scope begun " "by vkCmdBeginRenderingKHR(), but the sample count of the image view for " "VkRenderingInfoKHR::pStencilAttachment does not match " "VkCommandBufferInheritanceRenderingInfoKHR::rasterizationSamples.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } } } } } } } // TODO(mlentine): Move more logic into this method skip |= ValidateSecondaryCommandBufferState(cb_state.get(), sub_cb_state.get()); skip |= ValidateCommandBufferState(sub_cb_state.get(), "vkCmdExecuteCommands()", 0, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { if (sub_cb_state->InUse()) { skip |= LogError( cb_state->commandBuffer(), "VUID-vkCmdExecuteCommands-pCommandBuffers-00091", "vkCmdExecuteCommands(): Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str()); } // We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state.get()))) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(sub_cb_state->commandBuffer()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00092", "vkCmdExecuteCommands(): Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " "set if previously executed in %s", report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str(), report_data->FormatHandle(cb_state->commandBuffer()).c_str()); } const auto insert_pair = linked_command_buffers.insert(sub_cb_state.get()); if (!insert_pair.second) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdExecuteCommands-pCommandBuffers-00093", "vkCmdExecuteCommands(): Cannot duplicate %s in pCommandBuffers without " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(cb_state->commandBuffer()).c_str()); } if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->commandBuffer()); skip |= LogWarning(objlist, kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse, "vkCmdExecuteCommands(): Secondary %s does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary " "%s to be treated as if it does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->commandBuffer()).c_str()); } } if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00101", "vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and " "inherited queries not supported on this device.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } // Validate initial layout uses vs. the primary cmd buffer state // Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001" // initial layout usage of secondary command buffers resources must match parent command buffer const auto const_cb_state = std::static_pointer_cast<const CMD_BUFFER_STATE>(cb_state); for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) { const auto *image_state = sub_layout_map_entry.first; const auto image = image_state->image(); const auto *cb_subres_map = const_cb_state->GetImageSubresourceLayoutMap(*image_state); // Const getter can be null in which case we have nothing to check against for this image... if (!cb_subres_map) continue; const auto *sub_cb_subres_map = &sub_layout_map_entry.second; // Validate the initial_uses, that they match the current state of the primary cb, or absent a current state, // that the match any initial_layout. for (const auto &subres_layout : *sub_cb_subres_map) { const auto &sub_layout = subres_layout.initial_layout; const auto &subresource = subres_layout.subresource; if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial // Look up the layout to compared to the intial layout of the sub command buffer (current else initial) const auto *cb_layouts = cb_subres_map->GetSubresourceLayouts(subresource); auto cb_layout = cb_layouts ? cb_layouts->current_layout : kInvalidLayout; const char *layout_type = "current"; if (cb_layout == kInvalidLayout) { cb_layout = cb_layouts ? cb_layouts->initial_layout : kInvalidLayout; layout_type = "initial"; } if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) { skip |= LogError(pCommandBuffers[i], "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001", "%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, " "mip level %u) which expects layout %s--instead, image %s layout is %s.", "vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type, string_VkImageLayout(cb_layout)); } } } // All commands buffers involved must be protected or unprotected if ((cb_state->unprotected == false) && (sub_cb_state->unprotected == true)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(sub_cb_state->commandBuffer()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01820", "vkCmdExecuteCommands(): command buffer %s is protected while secondary command buffer %s is a unprotected", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str()); } else if ((cb_state->unprotected == true) && (sub_cb_state->unprotected == false)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(sub_cb_state->commandBuffer()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01821", "vkCmdExecuteCommands(): command buffer %s is unprotected while secondary command buffer %s is a protected", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str()); } if (active_occlusion_query && sub_cb_state->inheritanceInfo.occlusionQueryEnable != VK_TRUE) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00102", "vkCmdExecuteCommands(): command buffer %s has an active occlusion query, but secondary command " "buffer %s was recorded with VkCommandBufferInheritanceInfo::occlusionQueryEnable set to VK_FALSE", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str()); } } skip |= ValidateCmd(cb_state.get(), CMD_EXECUTECOMMANDS); return skip; } bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) const { bool skip = false; const auto mem_info = Get<DEVICE_MEMORY_STATE>(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { skip = LogError(mem, "VUID-vkMapMemory-memory-00682", "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } if (mem_info->multi_instance) { skip = LogError(mem, "VUID-vkMapMemory-memory-00683", "Memory (%s) must not have been allocated with multiple instances -- either by supplying a deviceMask " "with more than one bit set, or by allocation from a heap with the MULTI_INSTANCE heap flag set.", report_data->FormatHandle(mem).c_str()); } skip |= ValidateMapMemRange(mem_info.get(), offset, size); } return skip; } bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const { bool skip = false; const auto mem_info = Get<DEVICE_MEMORY_STATE>(mem); if (mem_info && !mem_info->mapped_range.size) { // Valid Usage: memory must currently be mapped skip |= LogError(mem, "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.", report_data->FormatHandle(mem).c_str()); } return skip; } bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; for (uint32_t i = 0; i < memRangeCount; ++i) { auto mem_info = Get<DEVICE_MEMORY_STATE>(pMemRanges[i].memory); if (mem_info) { // Makes sure the memory is already mapped if (mem_info->mapped_range.size == 0) { skip = LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-memory-00684", "%s: Attempting to use memory (%s) that is not currently host mapped.", funcName, report_data->FormatHandle(pMemRanges[i].memory).c_str()); } if (pMemRanges[i].size == VK_WHOLE_SIZE) { if (mem_info->mapped_range.offset > pMemRanges[i].offset) { skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00686", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mapped_range.offset)); } } else { const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE) ? mem_info->alloc_info.allocationSize : (mem_info->mapped_range.offset + mem_info->mapped_range.size); if ((mem_info->mapped_range.offset > pMemRanges[i].offset) || (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) { skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00685", "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end)); } } } } return skip; } bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) const { bool skip = false; for (uint32_t i = 0; i < mem_range_count; ++i) { const uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize; const VkDeviceSize offset = mem_ranges[i].offset; const VkDeviceSize size = mem_ranges[i].size; if (SafeModulo(offset, atom_size) != 0) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-offset-00687", "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, offset, atom_size); } auto mem_info = Get<DEVICE_MEMORY_STATE>(mem_ranges[i].memory); if (mem_info) { const auto allocation_size = mem_info->alloc_info.allocationSize; if (size == VK_WHOLE_SIZE) { const auto mapping_offset = mem_info->mapped_range.offset; const auto mapping_size = mem_info->mapped_range.size; const auto mapping_end = ((mapping_size == VK_WHOLE_SIZE) ? allocation_size : mapping_offset + mapping_size); if (SafeModulo(mapping_end, atom_size) != 0 && mapping_end != allocation_size) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01389", "%s: Size in pMemRanges[%d] is VK_WHOLE_SIZE and the mapping end (0x%" PRIxLEAST64 " = 0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64 ") not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ") and not equal to the end of the memory object (0x%" PRIxLEAST64 ").", func_name, i, mapping_end, mapping_offset, mapping_size, atom_size, allocation_size); } } else { const auto range_end = size + offset; if (range_end != allocation_size && SafeModulo(size, atom_size) != 0) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01390", "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ") and offset + size (0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64 " = 0x%" PRIxLEAST64 ") not equal to the memory size (0x%" PRIxLEAST64 ").", func_name, i, size, atom_size, offset, size, range_end, allocation_size); } } } } return skip; } bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const { bool skip = false; const auto mem_info = Get<DEVICE_MEMORY_STATE>(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) { skip = LogError(mem, "VUID-vkGetDeviceMemoryCommitment-memory-00690", "vkGetDeviceMemoryCommitment(): Querying commitment for memory without " "VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } } return skip; } bool CoreChecks::ValidateBindImageMemory(uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos, const char *api_name) const { bool skip = false; bool bind_image_mem_2 = strcmp(api_name, "vkBindImageMemory()") != 0; char error_prefix[128]; strcpy(error_prefix, api_name); // Track all image sub resources if they are bound for bind_image_mem_2 // uint32_t[3] is which index in pBindInfos for max 3 planes // Non disjoint images act as a single plane layer_data::unordered_map<VkImage, std::array<uint32_t, 3>> resources_bound; for (uint32_t i = 0; i < bindInfoCount; i++) { if (bind_image_mem_2 == true) { sprintf(error_prefix, "%s pBindInfos[%u]", api_name, i); } const VkBindImageMemoryInfo &bind_info = pBindInfos[i]; const auto image_state = Get<IMAGE_STATE>(bind_info.image); if (image_state) { // Track objects tied to memory skip |= ValidateSetMemBinding(bind_info.memory, *image_state, error_prefix); const auto plane_info = LvlFindInChain<VkBindImagePlaneMemoryInfo>(bind_info.pNext); const auto mem_info = Get<DEVICE_MEMORY_STATE>(bind_info.memory); // Need extra check for disjoint flag incase called without bindImage2 and don't want false positive errors // no 'else' case as if that happens another VUID is already being triggered for it being invalid if ((plane_info == nullptr) && (image_state->disjoint == false)) { // Check non-disjoint images VkMemoryRequirements // All validation using the image_state->requirements for external AHB is check in android only section if (image_state->IsExternalAHB() == false) { const VkMemoryRequirements &mem_req = image_state->requirements[0]; // Validate memory requirements alignment if (SafeModulo(bind_info.memoryOffset, mem_req.alignment) != 0) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memoryOffset-01048"; } else if (IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion)) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01616"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memoryOffset-01613"; } skip |= LogError(bind_info.image, validation_error, "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", error_prefix, bind_info.memoryOffset, mem_req.alignment); } if (mem_info) { safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info; // Validate memory requirements size if (mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-size-01049"; } else if (IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion)) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01617"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01614"; } skip |= LogError(bind_info.image, validation_error, "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, mem_req.size); } // Validate memory type used { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-01047"; } else if (IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion)) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01615"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01612"; } skip |= ValidateMemoryTypes(mem_info.get(), mem_req.memoryTypeBits, error_prefix, validation_error); } } } if (bind_image_mem_2 == true) { // since its a non-disjoint image, finding VkImage in map is a duplicate auto it = resources_bound.find(image_state->image()); if (it == resources_bound.end()) { std::array<uint32_t, 3> bound_index = {i, UINT32_MAX, UINT32_MAX}; resources_bound.emplace(image_state->image(), bound_index); } else { skip |= LogError( bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006", "%s: The same non-disjoint image resource is being bound twice at pBindInfos[%d] and pBindInfos[%d]", error_prefix, it->second[0], i); } } } else if ((plane_info != nullptr) && (image_state->disjoint == true)) { // Check disjoint images VkMemoryRequirements for given plane int plane = 0; // All validation using the image_state->plane*_requirements for external AHB is check in android only section if (image_state->IsExternalAHB() == false) { const VkImageAspectFlagBits aspect = plane_info->planeAspect; switch (aspect) { case VK_IMAGE_ASPECT_PLANE_0_BIT: plane = 0; break; case VK_IMAGE_ASPECT_PLANE_1_BIT: plane = 1; break; case VK_IMAGE_ASPECT_PLANE_2_BIT: plane = 2; break; default: assert(false); // parameter validation should have caught this break; } const VkMemoryRequirements &disjoint_mem_req = image_state->requirements[plane]; // Validate memory requirements alignment if (SafeModulo(bind_info.memoryOffset, disjoint_mem_req.alignment) != 0) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01620", "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements2 with disjoint image for aspect plane %s.", error_prefix, bind_info.memoryOffset, disjoint_mem_req.alignment, string_VkImageAspectFlagBits(aspect)); } if (mem_info) { safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info; // Validate memory requirements size if (disjoint_mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01621", "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with disjoint image for aspect plane %s.", error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, disjoint_mem_req.size, string_VkImageAspectFlagBits(aspect)); } // Validate memory type used { skip |= ValidateMemoryTypes(mem_info.get(), disjoint_mem_req.memoryTypeBits, error_prefix, "VUID-VkBindImageMemoryInfo-pNext-01619"); } } } auto it = resources_bound.find(image_state->image()); if (it == resources_bound.end()) { std::array<uint32_t, 3> bound_index = {UINT32_MAX, UINT32_MAX, UINT32_MAX}; bound_index[plane] = i; resources_bound.emplace(image_state->image(), bound_index); } else { if (it->second[plane] == UINT32_MAX) { it->second[plane] = i; } else { skip |= LogError(bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006", "%s: The same disjoint image sub-resource for plane %d is being bound twice at " "pBindInfos[%d] and pBindInfos[%d]", error_prefix, plane, it->second[plane], i); } } } if (mem_info) { // Validate bound memory range information // if memory is exported to an AHB then the mem_info->allocationSize must be zero and this check is not needed if ((mem_info->IsExport() == false) || ((mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0)) { skip |= ValidateInsertImageMemoryRange(bind_info.image, mem_info.get(), bind_info.memoryOffset, error_prefix); } // Validate dedicated allocation if (mem_info->IsDedicatedImage()) { if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) { const auto current_image_state = Get<IMAGE_STATE>(bind_info.image); if ((bind_info.memoryOffset != 0) || !current_image_state || !current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible( mem_info->dedicated->create_info.image)) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-02629"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-02629"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); objlist.add(mem_info->dedicated->handle); skip |= LogError( objlist, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must compatible " "with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str(), report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset); } } else { if ((bind_info.memoryOffset != 0) || (mem_info->dedicated->handle.Cast<VkImage>() != bind_info.image)) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-01509"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01509"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); objlist.add(mem_info->dedicated->handle); skip |= LogError(objlist, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str(), report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset); } } } // Validate export memory handles if ((mem_info->export_handle_type_flags != 0) && ((mem_info->export_handle_type_flags & image_state->external_memory_handle) == 0)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-memory-02728" : "VUID-vkBindImageMemory-memory-02728"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least " "one handle from VkImage (%s) handleType %s.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(), report_data->FormatHandle(bind_info.image).c_str(), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str()); } // Validate import memory handles if (mem_info->IsImportAHB() == true) { skip |= ValidateImageImportedHandleANDROID(api_name, image_state->external_memory_handle, bind_info.memory, bind_info.image); } else if (mem_info->IsImport() == true) { if ((mem_info->import_handle_type_flags & image_state->external_memory_handle) == 0) { const char *vuid = nullptr; if ((bind_image_mem_2) && IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindImageMemoryInfo-memory-02989"; } else if ((!bind_image_mem_2) && IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindImageMemory-memory-02989"; } else if ((bind_image_mem_2) && !IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindImageMemoryInfo-memory-02729"; } else if ((!bind_image_mem_2) && !IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindImageMemory-memory-02729"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s " "which is not set in the VkImage (%s) VkExternalMemoryImageCreateInfo::handleType (%s)", api_name, report_data->FormatHandle(bind_info.memory).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(), report_data->FormatHandle(bind_info.image).c_str(), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str()); } } // Validate mix of protected buffer and memory if ((image_state->unprotected == false) && (mem_info->unprotected == true)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01901" : "VUID-vkBindImageMemory-None-01901"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was not created with protected memory but the VkImage (%s) was " "set to use protected memory.", api_name, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(bind_info.image).c_str()); } else if ((image_state->unprotected == true) && (mem_info->unprotected == false)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01902" : "VUID-vkBindImageMemory-None-01902"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with protected memory but the VkImage (%s) was not " "set to use protected memory.", api_name, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(bind_info.image).c_str()); } } const auto swapchain_info = LvlFindInChain<VkBindImageMemorySwapchainInfoKHR>(bind_info.pNext); if (swapchain_info) { if (bind_info.memory != VK_NULL_HANDLE) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str()); } if (image_state->create_from_swapchain != swapchain_info->swapchain) { LogObjectList objlist(image_state->image()); objlist.add(image_state->create_from_swapchain); objlist.add(swapchain_info->swapchain); skip |= LogError( objlist, kVUID_Core_BindImageMemory_Swapchain, "%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same " "swapchain", error_prefix, report_data->FormatHandle(image_state->image()).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str(), report_data->FormatHandle(swapchain_info->swapchain).c_str()); } const auto swapchain_state = Get<SWAPCHAIN_NODE>(swapchain_info->swapchain); if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644", "%s: imageIndex (%i) is out of bounds of %s images (size: %i)", error_prefix, swapchain_info->imageIndex, report_data->FormatHandle(swapchain_info->swapchain).c_str(), static_cast<int>(swapchain_state->images.size())); } } else { if (image_state->create_from_swapchain) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-image-01630", "%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.", error_prefix); } if (!mem_info) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str()); } } const auto bind_image_memory_device_group_info = LvlFindInChain<VkBindImageMemoryDeviceGroupInfo>(bind_info.pNext); if (bind_image_memory_device_group_info && bind_image_memory_device_group_info->splitInstanceBindRegionCount != 0) { if (!(image_state->createInfo.flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT)) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01627", "%s: pNext of VkBindImageMemoryInfo contains VkBindImageMemoryDeviceGroupInfo with " "splitInstanceBindRegionCount (%" PRIi32 ") not equal to 0 and %s is not created with " "VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT.", error_prefix, bind_image_memory_device_group_info->splitInstanceBindRegionCount, report_data->FormatHandle(image_state->image()).c_str()); } uint32_t phy_dev_square = 1; if (device_group_create_info.physicalDeviceCount > 0) { phy_dev_square = device_group_create_info.physicalDeviceCount * device_group_create_info.physicalDeviceCount; } if (bind_image_memory_device_group_info->splitInstanceBindRegionCount != phy_dev_square) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryDeviceGroupInfo-splitInstanceBindRegionCount-01636", "%s: pNext of VkBindImageMemoryInfo contains VkBindImageMemoryDeviceGroupInfo with " "splitInstanceBindRegionCount (%" PRIi32 ") which is not 0 and different from the number of physical devices in the logical device squared (%" PRIu32 ").", error_prefix, bind_image_memory_device_group_info->splitInstanceBindRegionCount, phy_dev_square); } } if (plane_info) { // Checks for disjoint bit in image if (image_state->disjoint == false) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01618", "%s: pNext of VkBindImageMemoryInfo contains VkBindImagePlaneMemoryInfo and %s is not created with " "VK_IMAGE_CREATE_DISJOINT_BIT.", error_prefix, report_data->FormatHandle(image_state->image()).c_str()); } // Make sure planeAspect is only a single, valid plane uint32_t planes = FormatPlaneCount(image_state->createInfo.format); VkImageAspectFlags aspect = plane_info->planeAspect; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) { skip |= LogError( bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283", "%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT.", error_prefix, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) { skip |= LogError( bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283", "%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.", error_prefix, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } } } const auto bind_image_memory_device_group = LvlFindInChain<VkBindImageMemoryDeviceGroupInfo>(bind_info.pNext); if (bind_image_memory_device_group) { if (bind_image_memory_device_group->deviceIndexCount > 0 && bind_image_memory_device_group->splitInstanceBindRegionCount > 0) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryDeviceGroupInfo-deviceIndexCount-01633", "%s: VkBindImageMemoryDeviceGroupInfo in pNext of pBindInfos[%" PRIu32 "] has both deviceIndexCount and splitInstanceBindRegionCount greater than 0.", error_prefix, i); } } } // Check to make sure all disjoint planes were bound for (auto &resource : resources_bound) { const auto image_state = Get<IMAGE_STATE>(resource.first); if (image_state->disjoint == true) { uint32_t total_planes = FormatPlaneCount(image_state->createInfo.format); for (uint32_t i = 0; i < total_planes; i++) { if (resource.second[i] == UINT32_MAX) { skip |= LogError(resource.first, "VUID-vkBindImageMemory2-pBindInfos-02858", "%s: Plane %u of the disjoint image was not bound. All %d planes need to bound individually " "in separate pBindInfos in a single call.", api_name, i, total_planes); } } } } return skip; } bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) const { bool skip = false; const auto image_state = Get<IMAGE_STATE>(image); if (image_state) { // Checks for no disjoint bit if (image_state->disjoint == true) { skip |= LogError(image, "VUID-vkBindImageMemory-image-01608", "%s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT (need to use vkBindImageMemory2).", report_data->FormatHandle(image).c_str()); } } auto bind_info = LvlInitStruct<VkBindImageMemoryInfo>(); bind_info.image = image; bind_info.memory = mem; bind_info.memoryOffset = memoryOffset; skip |= ValidateBindImageMemory(1, &bind_info, "vkBindImageMemory()"); return skip; } bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos) const { return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2()"); } bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos) const { return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2KHR()"); } bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = Get<EVENT_STATE>(event); if (event_state) { if (event_state->write_in_use) { skip |= LogError(event, kVUID_Core_DrawState_QueueForwardProgress, "vkSetEvent(): %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str()); } if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkSetEvent-event-03941", "vkSetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateResetEvent(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = Get<EVENT_STATE>(event); if (event_state) { if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkResetEvent-event-03823", "vkResetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateGetEventStatus(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = Get<EVENT_STATE>(event); if (event_state) { if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkGetEventStatus-event-03940", "vkGetEventStatus(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::ValidateSparseMemoryBind(const VkSparseMemoryBind *bind, const char *func_name, const char *parameter_name) const { bool skip = false; if (bind) { const auto mem_info = Get<DEVICE_MEMORY_STATE>(bind->memory); if (mem_info) { if (phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) { skip |= LogError(bind->memory, "VUID-VkSparseMemoryBind-memory-01097", "%s: %s memory type has VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT bit set.", func_name, parameter_name); } } } return skip; } bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) const { const auto queue_data = Get<QUEUE_STATE>(queue); const auto fence_state = Get<FENCE_STATE>(fence); bool skip = ValidateFenceForSubmit(fence_state.get(), "VUID-vkQueueBindSparse-fence-01114", "VUID-vkQueueBindSparse-fence-01113", "VkQueueBindSparse()"); if (skip) { return true; } const auto queue_flags = physical_device_state->queue_family_properties[queue_data->queueFamilyIndex].queueFlags; if (!(queue_flags & VK_QUEUE_SPARSE_BINDING_BIT)) { skip |= LogError(queue, "VUID-vkQueueBindSparse-queuetype", "vkQueueBindSparse(): a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set."); } layer_data::unordered_set<VkSemaphore> signaled_semaphores; layer_data::unordered_set<VkSemaphore> unsignaled_semaphores; layer_data::unordered_set<VkSemaphore> internal_semaphores; auto *vuid_error = IsExtEnabled(device_extensions.vk_khr_timeline_semaphore) ? "VUID-vkQueueBindSparse-pWaitSemaphores-03245" : kVUID_Core_DrawState_QueueForwardProgress; for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) { Location outer_loc(Func::vkQueueBindSparse, Struct::VkBindSparseInfo); const VkBindSparseInfo &bind_info = pBindInfo[bind_idx]; auto timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(pBindInfo->pNext); for (uint32_t i = 0; i < bind_info.waitSemaphoreCount; ++i) { VkSemaphore semaphore = bind_info.pWaitSemaphores[i]; const auto semaphore_state = Get<SEMAPHORE_STATE>(semaphore); if (!semaphore_state) { continue; } switch (semaphore_state->type) { case VK_SEMAPHORE_TYPE_TIMELINE: if (!timeline_semaphore_submit_info) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246", "VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but " "pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx); } else if (bind_info.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) { skip |= LogError( semaphore, "VUID-VkBindSparseInfo-pNext-03247", "VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains " "an instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different " "than pBindInfo[%u].waitSemaphoreCount (%u)", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->waitSemaphoreValueCount, bind_idx, bind_info.waitSemaphoreCount); } else { auto loc = outer_loc.dot(Field::pWaitSemaphoreValues, i); skip |= ValidateMaxTimelineSemaphoreValueDifference( loc, *semaphore_state, timeline_semaphore_submit_info->pWaitSemaphoreValues[i]); } break; case VK_SEMAPHORE_TYPE_BINARY: if ((semaphore_state->Scope() == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!signaled_semaphores.count(semaphore) && !semaphore_state->CanBeWaited())) { LogObjectList objlist(semaphore); objlist.add(queue); skip |= LogError(objlist, semaphore_state->Scope() == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): Queue %s is waiting on pBindInfo[%u].pWaitSemaphores[%u] (%s) that " "has no way to be " "signaled.", report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } else if (semaphore_state->Scope() == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } break; default: break; } } for (uint32_t i = 0; i < bind_info.signalSemaphoreCount; ++i) { VkSemaphore semaphore = bind_info.pSignalSemaphores[i]; const auto semaphore_state = Get<SEMAPHORE_STATE>(semaphore); if (!semaphore_state) { continue; } switch (semaphore_state->type) { case VK_SEMAPHORE_TYPE_TIMELINE: if (!timeline_semaphore_submit_info) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246", "VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but " "pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx); } else if (bind_info.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) { skip |= LogError( semaphore, "VUID-VkBindSparseInfo-pNext-03248", "VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains " "an instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different " "than pBindInfo[%u].signalSemaphoreCount (%u)", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->signalSemaphoreValueCount, bind_idx, bind_info.signalSemaphoreCount); } else if (timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= semaphore_state->Completed().payload) { LogObjectList objlist(semaphore); objlist.add(queue); skip |= LogError(objlist, "VUID-VkBindSparseInfo-pSignalSemaphores-03249", "VkQueueBindSparse: signal value (0x%" PRIx64 ") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ") in pBindInfo[%u].pSignalSemaphores[%u]", semaphore_state->Completed().payload, report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->pSignalSemaphoreValues[i], bind_idx, i); } else { auto loc = outer_loc.dot(Field::pSignalSemaphoreValues, i); skip |= ValidateMaxTimelineSemaphoreValueDifference( loc, *semaphore_state, timeline_semaphore_submit_info->pSignalSemaphoreValues[i]); } break; case VK_SEMAPHORE_TYPE_BINARY: if (semaphore_state->Scope() == kSyncScopeInternal) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && !semaphore_state->CanBeSignaled())) { auto last_op = semaphore_state->LastOp(); VkQueue other_queue = last_op && last_op->queue ? last_op->queue->Queue() : VK_NULL_HANDLE; LogObjectList objlist(semaphore); objlist.add(queue); objlist.add(other_queue); skip |= LogError( objlist, kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): %s is signaling pBindInfo[%u].pSignalSemaphores[%u] (%s) that was " "previously signaled by %s but has not since been waited on by any queue.", report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(other_queue).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } break; default: break; } } if (bind_info.pBufferBinds) { for (uint32_t buffer_idx = 0; buffer_idx < bind_info.bufferBindCount; ++buffer_idx) { const VkSparseBufferMemoryBindInfo &buffer_bind = bind_info.pBufferBinds[buffer_idx]; if (buffer_bind.pBinds) { for (uint32_t buffer_bind_idx = 0; buffer_bind_idx < buffer_bind.bindCount; ++buffer_bind_idx) { const VkSparseMemoryBind &memory_bind = buffer_bind.pBinds[buffer_bind_idx]; std::stringstream parameter_name; parameter_name << "pBindInfo[" << bind_idx << "].pBufferBinds[" << buffer_idx << " ].pBinds[" << buffer_bind_idx << "]"; ValidateSparseMemoryBind(&memory_bind, "vkQueueBindSparse()", parameter_name.str().c_str()); const auto mem_info = Get<DEVICE_MEMORY_STATE>(memory_bind.memory); if (mem_info) { if (memory_bind.memoryOffset >= mem_info->alloc_info.allocationSize) { skip |= LogError(buffer_bind.buffer, "VUID-VkSparseMemoryBind-memoryOffset-01101", "vkQueueBindSparse(): pBindInfo[%u].pBufferBinds[%u]: memoryOffset is not less than " "the size of memory", bind_idx, buffer_idx); } } } } } } if (bind_info.pImageOpaqueBinds) { for (uint32_t image_opaque_idx = 0; image_opaque_idx < bind_info.bufferBindCount; ++image_opaque_idx) { const VkSparseImageOpaqueMemoryBindInfo &image_opaque_bind = bind_info.pImageOpaqueBinds[image_opaque_idx]; if (image_opaque_bind.pBinds) { for (uint32_t image_opaque_bind_idx = 0; image_opaque_bind_idx < image_opaque_bind.bindCount; ++image_opaque_bind_idx) { const VkSparseMemoryBind &memory_bind = image_opaque_bind.pBinds[image_opaque_bind_idx]; std::stringstream parameter_name; parameter_name << "pBindInfo[" << bind_idx << "].pImageOpaqueBinds[" << image_opaque_idx << " ].pBinds[" << image_opaque_bind_idx << "]"; ValidateSparseMemoryBind(&memory_bind, "vkQueueBindSparse()", parameter_name.str().c_str()); const auto mem_info = Get<DEVICE_MEMORY_STATE>(memory_bind.memory); if (mem_info) { if (memory_bind.memoryOffset >= mem_info->alloc_info.allocationSize) { skip |= LogError( image_opaque_bind.image, "VUID-VkSparseMemoryBind-memoryOffset-01101", "vkQueueBindSparse(): pBindInfo[%u].pImageOpaqueBinds[%u]: memoryOffset is not less than " "the size of memory", bind_idx, image_opaque_idx); } } } } } } if (bind_info.pImageBinds) { for (uint32_t image_idx = 0; image_idx < bind_info.imageBindCount; ++image_idx) { const VkSparseImageMemoryBindInfo &image_bind = bind_info.pImageBinds[image_idx]; const auto image_state = Get<IMAGE_STATE>(image_bind.image); if (image_state && !(image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) { skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-image-02901", "vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: image must have been created with " "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set", bind_idx, image_idx); } if (image_bind.pBinds) { for (uint32_t image_bind_idx = 0; image_bind_idx < image_bind.bindCount; ++image_bind_idx) { const VkSparseImageMemoryBind &memory_bind = image_bind.pBinds[image_bind_idx]; const auto mem_info = Get<DEVICE_MEMORY_STATE>(memory_bind.memory); if (mem_info) { if (memory_bind.memoryOffset >= mem_info->alloc_info.allocationSize) { skip |= LogError(image_bind.image, "VUID-VkSparseMemoryBind-memoryOffset-01101", "vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: memoryOffset is not less than " "the size of memory", bind_idx, image_idx); } } if (image_state) { if (memory_bind.subresource.mipLevel >= image_state->createInfo.mipLevels) { skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-subresource-01722", "vkQueueBindSparse(): pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32 "].subresource.mipLevel (%" PRIu32 ") is not less than mipLevels (%" PRIu32 ") of image pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32 "].image.", bind_idx, image_idx, memory_bind.subresource.mipLevel, image_state->createInfo.mipLevels, bind_idx, image_idx); } if (memory_bind.subresource.arrayLayer >= image_state->createInfo.arrayLayers) { skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-subresource-01723", "vkQueueBindSparse(): pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32 "].subresource.arrayLayer (%" PRIu32 ") is not less than arrayLayers (%" PRIu32 ") of image pBindInfo[%" PRIu32 "].pImageBinds[%" PRIu32 "].image.", bind_idx, image_idx, memory_bind.subresource.arrayLayer, image_state->createInfo.arrayLayers, bind_idx, image_idx); } } } } } } } return skip; } bool CoreChecks::ValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo, const char *api_name) const { bool skip = false; const auto semaphore_state = Get<SEMAPHORE_STATE>(pSignalInfo->semaphore); if (!semaphore_state) { return skip; } if (semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-semaphore-03257", "%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type.", api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); return skip; } const auto completed = semaphore_state->Completed(); if (completed.payload >= pSignalInfo->value) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03258", "%s(): value (%" PRIu64 ") must be greater than current semaphore %s value (%" PRIu64 ").", api_name, pSignalInfo->value, report_data->FormatHandle(pSignalInfo->semaphore).c_str(), completed.payload); return skip; } else if (semaphore_state->HasPendingOps()) { // look back for the last signal operation, but there could be pending waits with higher payloads behind it. const auto last_op = semaphore_state->LastOp([](const SEMAPHORE_STATE::SemOp &op) { return op.IsSignal(); }); if (last_op && pSignalInfo->value >= last_op->payload) { skip |= LogError( pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03259", "%s(): value (%" PRIu64 ") must be less than value of any pending signal operation (%" PRIu64 ") for semaphore %s.", api_name, pSignalInfo->value, last_op->payload, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); } } if (!skip) { Location loc(Func::vkSignalSemaphore, Struct::VkSemaphoreSignalInfo, Field::value); skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, *semaphore_state, pSignalInfo->value); } return skip; } bool CoreChecks::PreCallValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const { return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphore"); } bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const { return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphoreKHR"); } bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const { bool skip = false; const auto sema_node = Get<SEMAPHORE_STATE>(semaphore); if (sema_node) { skip |= ValidateObjectNotInUse(sema_node.get(), caller_name, kVUIDUndefined); } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const { return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR"); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const { return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR"); } bool CoreChecks::ValidateImportFence(VkFence fence, const char *vuid, const char *caller_name) const { const auto fence_node = Get<FENCE_STATE>(fence); bool skip = false; if (fence_node && fence_node->Scope() == kSyncScopeInternal && fence_node->State() == FENCE_INFLIGHT) { skip |= LogError(fence, vuid, "%s: Fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str()); } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR( VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const { return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "VUID-vkImportFenceWin32HandleKHR-fence-04448", "vkImportFenceWin32HandleKHR()"); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const { return ValidateImportFence(pImportFenceFdInfo->fence, "VUID-vkImportFenceFdKHR-fence-01463", "vkImportFenceFdKHR()"); } static VkImageCreateInfo GetSwapchainImpliedImageCreateInfo(VkSwapchainCreateInfoKHR const *pCreateInfo) { auto result = LvlInitStruct<VkImageCreateInfo>(); if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) { result.flags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT; } if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) result.flags |= VK_IMAGE_CREATE_PROTECTED_BIT; if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) { result.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT; } result.imageType = VK_IMAGE_TYPE_2D; result.format = pCreateInfo->imageFormat; result.extent.width = pCreateInfo->imageExtent.width; result.extent.height = pCreateInfo->imageExtent.height; result.extent.depth = 1; result.mipLevels = 1; result.arrayLayers = pCreateInfo->imageArrayLayers; result.samples = VK_SAMPLE_COUNT_1_BIT; result.tiling = VK_IMAGE_TILING_OPTIMAL; result.usage = pCreateInfo->imageUsage; result.sharingMode = pCreateInfo->imageSharingMode; result.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount; result.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices; result.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; return result; } bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo, const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const { // All physical devices and queue families are required to be able to present to any native window on Android; require the // application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { // restrict search only to queue families of VkDeviceQueueCreateInfos, not the whole physical device bool is_supported = AnyOf<QUEUE_STATE>([this, surface_state](const QUEUE_STATE &queue_state) { return surface_state->GetQueueSupport(physical_device, queue_state.queueFamilyIndex); }); if (!is_supported) { LogObjectList objs(device); objs.add(surface_state->Handle()); if (LogError(objs, "VUID-VkSwapchainCreateInfoKHR-surface-01270", "%s: pCreateInfo->surface is not supported for presentation by this device.", func_name)) { return true; } } } if (old_swapchain_state) { if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) { if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) { return true; } } if (old_swapchain_state->retired) { if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain is retired", func_name)) { return true; } } } if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689", "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height)) { return true; } } VkSurfaceCapabilitiesKHR capabilities{}; DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->PhysDev(), pCreateInfo->surface, &capabilities); bool skip = false; VkSurfaceTransformFlagBitsKHR current_transform = capabilities.currentTransform; if ((pCreateInfo->preTransform & current_transform) != pCreateInfo->preTransform) { skip |= LogPerformanceWarning(physical_device, kVUID_Core_Swapchain_PreTransform, "%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image " "content as part of the presentation operation.", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform), string_VkSurfaceTransformFlagBitsKHR(current_transform)); } const VkPresentModeKHR present_mode = pCreateInfo->presentMode; const bool shared_present_mode = (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == present_mode || VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == present_mode); // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount: // Shared Present Mode must have a minImageCount of 1 if ((pCreateInfo->minImageCount < capabilities.minImageCount) && !shared_present_mode) { const char *vuid = IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) ? "VUID-VkSwapchainCreateInfoKHR-presentMode-02839" : "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271"; if (LogError(device, vuid, "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) { return true; } } if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) { return true; } } // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent: if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) || (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) || (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) || (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274", "%s called with imageExtent = (%d,%d), which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), " "maxImageExtent = (%d,%d).", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) { return true; } } // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedTransforms. if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) || !(pCreateInfo->preTransform & capabilities.supportedTransforms)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string error_string = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform)); error_string += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedTransforms) { const char *new_str = string_VkSurfaceTransformFlagBitsKHR(static_cast<VkSurfaceTransformFlagBitsKHR>(1 << i)); sprintf(str, " %s\n", new_str); error_string += str; } } // Log the message that we've built up: if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", error_string.c_str())) return true; } // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) || !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string error_string = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha)); error_string += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedCompositeAlpha) { const char *new_str = string_VkCompositeAlphaFlagBitsKHR(static_cast<VkCompositeAlphaFlagBitsKHR>(1 << i)); sprintf(str, " %s\n", new_str); error_string += str; } } // Log the message that we've built up: if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", error_string.c_str())) return true; } // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers: if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275", "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) { return true; } } const VkImageUsageFlags image_usage = pCreateInfo->imageUsage; // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags: // Shared Present Mode uses different set of capabilities to check imageUsage support if ((image_usage != (image_usage & capabilities.supportedUsageFlags)) && !shared_present_mode) { const char *vuid = IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) ? "VUID-VkSwapchainCreateInfoKHR-presentMode-01427" : "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276"; if (LogError(device, vuid, "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.", func_name, image_usage, capabilities.supportedUsageFlags)) { return true; } } if (IsExtEnabled(device_extensions.vk_khr_surface_protected_capabilities) && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) { VkPhysicalDeviceSurfaceInfo2KHR surface_info = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR}; surface_info.surface = pCreateInfo->surface; VkSurfaceProtectedCapabilitiesKHR surface_protected_capabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR}; VkSurfaceCapabilities2KHR surface_capabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR}; surface_capabilities.pNext = &surface_protected_capabilities; DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->PhysDev(), &surface_info, &surface_capabilities); if (!surface_protected_capabilities.supportsProtected) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03187", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface " "capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.", func_name)) { return true; } } } // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR(): { // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format: bool found_format = false; bool found_color_space = false; bool found_match = false; const auto formats = surface_state->GetFormats(physical_device); for (const auto &format : formats) { if (pCreateInfo->imageFormat == format.format) { // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace: found_format = true; if (pCreateInfo->imageColorSpace == format.colorSpace) { found_match = true; break; } } else { if (pCreateInfo->imageColorSpace == format.colorSpace) { found_color_space = true; } } } if (!found_match) { if (!found_format) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageFormat (%s).", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } if (!found_color_space) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageColorSpace (%s).", func_name, string_VkColorSpaceKHR(pCreateInfo->imageColorSpace))) { return true; } } } } // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR(): auto present_modes = surface_state->GetPresentModes(physical_device); bool found_match = std::find(present_modes.begin(), present_modes.end(), present_mode) != present_modes.end(); if (!found_match) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-presentMode-01281", "%s called with a non-supported presentMode (i.e. %s).", func_name, string_VkPresentModeKHR(present_mode))) { return true; } } // Validate state for shared presentable case if (shared_present_mode) { if (!IsExtEnabled(device_extensions.vk_khr_shared_presentable_image)) { if (LogError( device, kVUID_Core_DrawState_ExtensionNotEnabled, "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not " "been enabled.", func_name, string_VkPresentModeKHR(present_mode))) { return true; } } else if (pCreateInfo->minImageCount != 1) { if (LogError( device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383", "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount " "must be 1.", func_name, string_VkPresentModeKHR(present_mode), pCreateInfo->minImageCount)) { return true; } } VkSharedPresentSurfaceCapabilitiesKHR shared_present_capabilities = LvlInitStruct<VkSharedPresentSurfaceCapabilitiesKHR>(); VkSurfaceCapabilities2KHR capabilities2 = LvlInitStruct<VkSurfaceCapabilities2KHR>(&shared_present_capabilities); VkPhysicalDeviceSurfaceInfo2KHR surface_info = LvlInitStruct<VkPhysicalDeviceSurfaceInfo2KHR>(); surface_info.surface = pCreateInfo->surface; DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->PhysDev(), &surface_info, &capabilities2); if (image_usage != (image_usage & shared_present_capabilities.sharedPresentSupportedUsageFlags)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageUsage-01384", "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits for %s " "present mode are 0x%08x.", func_name, image_usage, string_VkPresentModeKHR(pCreateInfo->presentMode), shared_present_capabilities.sharedPresentSupportedUsageFlags)) { return true; } } } if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) { bool skip1 = ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428"); if (skip1) return true; } // Validate pCreateInfo->imageUsage against GetPhysicalDeviceFormatProperties const VkFormatProperties3KHR format_properties = GetPDFormatProperties(pCreateInfo->imageFormat); const VkFormatFeatureFlags2KHR tiling_features = format_properties.optimalTilingFeatures; if (tiling_features == 0) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL has no supported format features on this " "physical device.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_SAMPLED_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_STORAGE_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((image_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) && !(tiling_features & (VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR | VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR))) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } const VkImageCreateInfo image_create_info = GetSwapchainImpliedImageCreateInfo(pCreateInfo); VkImageFormatProperties image_properties = {}; const VkResult image_properties_result = DispatchGetPhysicalDeviceImageFormatProperties( physical_device, image_create_info.format, image_create_info.imageType, image_create_info.tiling, image_create_info.usage, image_create_info.flags, &image_properties); if (image_properties_result != VK_SUCCESS) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "vkGetPhysicalDeviceImageFormatProperties() unexpectedly failed, " "when called for %s validation with following params: " "format: %s, imageType: %s, " "tiling: %s, usage: %s, " "flags: %s.", func_name, string_VkFormat(image_create_info.format), string_VkImageType(image_create_info.imageType), string_VkImageTiling(image_create_info.tiling), string_VkImageUsageFlags(image_create_info.usage).c_str(), string_VkImageCreateFlags(image_create_info.flags).c_str())) { return true; } } // Validate pCreateInfo->imageArrayLayers against VkImageFormatProperties::maxArrayLayers if (pCreateInfo->imageArrayLayers > image_properties.maxArrayLayers) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s called with a non-supported imageArrayLayers (i.e. %d). " "Maximum value returned by vkGetPhysicalDeviceImageFormatProperties() is %d " "for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL", func_name, pCreateInfo->imageArrayLayers, image_properties.maxArrayLayers, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } // Validate pCreateInfo->imageExtent against VkImageFormatProperties::maxExtent if ((pCreateInfo->imageExtent.width > image_properties.maxExtent.width) || (pCreateInfo->imageExtent.height > image_properties.maxExtent.height)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s called with imageExtent = (%d,%d), which is bigger than max extent (%d,%d)" "returned by vkGetPhysicalDeviceImageFormatProperties(): " "for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, image_properties.maxExtent.width, image_properties.maxExtent.height, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } if ((pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) && device_group_create_info.physicalDeviceCount == 1) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-physicalDeviceCount-01429", "%s called with flags containing VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR" "but logical device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount equal to 1", func_name)) { return true; } } return skip; } bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const { const auto surface_state = Get<SURFACE_STATE>(pCreateInfo->surface); const auto old_swapchain_state = Get<SWAPCHAIN_NODE>(pCreateInfo->oldSwapchain); return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state.get(), old_swapchain_state.get()); } void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { if (swapchain) { auto swapchain_data = Get<SWAPCHAIN_NODE>(swapchain); if (swapchain_data) { for (const auto &swapchain_image : swapchain_data->images) { if (!swapchain_image.image_state) continue; imageLayoutMap.erase(swapchain_image.image_state); qfo_release_image_barrier_map.erase(swapchain_image.image_state->image()); } } } StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator); } void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages, VkResult result) { // This function will run twice. The first is to get pSwapchainImageCount. The second is to get pSwapchainImages. // The first time in StateTracker::PostCallRecordGetSwapchainImagesKHR only generates the container's size. // The second time in StateTracker::PostCallRecordGetSwapchainImagesKHR will create VKImage and IMAGE_STATE. // So GlobalImageLayoutMap saving new IMAGE_STATEs has to run in the second time. // pSwapchainImages is not nullptr and it needs to wait until StateTracker::PostCallRecordGetSwapchainImagesKHR. uint32_t new_swapchain_image_index = 0; if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) { auto swapchain_state = Get<SWAPCHAIN_NODE>(swapchain); const auto image_vector_size = swapchain_state->images.size(); for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) { if ((new_swapchain_image_index >= image_vector_size) || !swapchain_state->images[new_swapchain_image_index].image_state) { break; }; } } StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result); if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) { for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) { auto image_state = Get<IMAGE_STATE>(pSwapchainImages[new_swapchain_image_index]); AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap); } } } bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const { bool skip = false; const auto queue_state = Get<QUEUE_STATE>(queue); for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { const auto semaphore_state = Get<SEMAPHORE_STATE>(pPresentInfo->pWaitSemaphores[i]); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) { skip |= LogError(pPresentInfo->pWaitSemaphores[i], "VUID-vkQueuePresentKHR-pWaitSemaphores-03267", "vkQueuePresentKHR: pWaitSemaphores[%u] (%s) is not a VK_SEMAPHORE_TYPE_BINARY", i, report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } if (semaphore_state && !semaphore_state->CanBeWaited()) { LogObjectList objlist(queue); objlist.add(pPresentInfo->pWaitSemaphores[i]); skip |= LogError(objlist, "VUID-vkQueuePresentKHR-pWaitSemaphores-03268", "vkQueuePresentKHR: Queue %s is waiting on pWaitSemaphores[%u] (%s) that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), i, report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } } for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { const auto swapchain_data = Get<SWAPCHAIN_NODE>(pPresentInfo->pSwapchains[i]); if (swapchain_data) { // VU currently is 2-in-1, covers being a valid index and valid layout const char *validation_error = IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) ? "VUID-VkPresentInfoKHR-pImageIndices-01430" : "VUID-VkPresentInfoKHR-pImageIndices-01296"; // Check if index is even possible to be acquired to give better error message if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) { skip |= LogError( pPresentInfo->pSwapchains[i], validation_error, "vkQueuePresentKHR: pSwapchains[%u] image index is too large (%u). There are only %u images in this swapchain.", i, pPresentInfo->pImageIndices[i], static_cast<uint32_t>(swapchain_data->images.size())); } else if (!swapchain_data->images[pPresentInfo->pImageIndices[i]].image_state || !swapchain_data->images[pPresentInfo->pImageIndices[i]].acquired) { skip |= LogError(pPresentInfo->pSwapchains[i], validation_error, "vkQueuePresentKHR: pSwapchains[%" PRIu32 "] image at index %" PRIu32 " was not acquired from the swapchain.", i, pPresentInfo->pImageIndices[i]); } else { const auto *image_state = swapchain_data->images[pPresentInfo->pImageIndices[i]].image_state; assert(image_state); vector<VkImageLayout> layouts; if (FindLayouts(*image_state, layouts)) { for (auto layout : layouts) { if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) || (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) { skip |= LogError(queue, validation_error, "vkQueuePresentKHR(): pSwapchains[%u] images passed to present must be in layout " "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.", i, string_VkImageLayout(layout)); } } } const auto *display_present_info = LvlFindInChain<VkDisplayPresentInfoKHR>(pPresentInfo->pNext); if (display_present_info) { if (display_present_info->srcRect.offset.x < 0 || display_present_info->srcRect.offset.y < 0 || display_present_info->srcRect.offset.x + display_present_info->srcRect.extent.width > image_state->createInfo.extent.width || display_present_info->srcRect.offset.y + display_present_info->srcRect.extent.height > image_state->createInfo.extent.height) { skip |= LogError(queue, "VUID-VkDisplayPresentInfoKHR-srcRect-01257", "vkQueuePresentKHR(): VkDisplayPresentInfoKHR::srcRect (offset (%" PRIu32 ", %" PRIu32 "), extent (%" PRIu32 ", %" PRIu32 ")) in the pNext chain of VkPresentInfoKHR is not a subset of the image begin presented " "(extent (%" PRIu32 ", %" PRIu32 ")).", display_present_info->srcRect.offset.x, display_present_info->srcRect.offset.y, display_present_info->srcRect.extent.width, display_present_info->srcRect.extent.height, image_state->createInfo.extent.width, image_state->createInfo.extent.height); } } } // All physical devices and queue families are required to be able to present to any native window on Android if (!instance_extensions.vk_khr_android_surface) { const auto surface_state = Get<SURFACE_STATE>(swapchain_data->createInfo.surface); if (!surface_state->GetQueueSupport(physical_device, queue_state->queueFamilyIndex)) { skip |= LogError( pPresentInfo->pSwapchains[i], "VUID-vkQueuePresentKHR-pSwapchains-01292", "vkQueuePresentKHR: Presenting pSwapchains[%u] image on queue that cannot present to this surface.", i); } } } } if (pPresentInfo->pNext) { // Verify ext struct const auto *present_regions = LvlFindInChain<VkPresentRegionsKHR>(pPresentInfo->pNext); if (present_regions) { for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) { const auto swapchain_data = Get<SWAPCHAIN_NODE>(pPresentInfo->pSwapchains[i]); assert(swapchain_data); VkPresentRegionKHR region = present_regions->pRegions[i]; for (uint32_t j = 0; j < region.rectangleCount; ++j) { VkRectLayerKHR rect = region.pRectangles[j]; // Swap offsets and extents for 90 or 270 degree preTransform rotation if (swapchain_data->createInfo.preTransform & (VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR | VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR | VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR | VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR)) { std::swap(rect.offset.x, rect.offset.y); std::swap(rect.extent.width, rect.extent.height); } if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) { skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], " "the sum of offset.x (%i) and extent.width (%i) after applying preTransform (%s) is greater " "than the corresponding swapchain's imageExtent.width (%i).", i, j, rect.offset.x, rect.extent.width, string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform), swapchain_data->createInfo.imageExtent.width); } if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) { skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], " "the sum of offset.y (%i) and extent.height (%i) after applying preTransform (%s) is greater " "than the corresponding swapchain's imageExtent.height (%i).", i, j, rect.offset.y, rect.extent.height, string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform), swapchain_data->createInfo.imageExtent.height); } if (rect.layer > swapchain_data->createInfo.imageArrayLayers) { skip |= LogError( pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-layer-01262", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer " "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).", i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers); } } } } const auto *present_times_info = LvlFindInChain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext); if (present_times_info) { if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) { skip |= LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247", "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount " "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, " "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.", present_times_info->swapchainCount, pPresentInfo->swapchainCount); } } const auto *present_id_info = LvlFindInChain<VkPresentIdKHR>(pPresentInfo->pNext); if (present_id_info) { if (!enabled_features.present_id_features.presentId) { for (uint32_t i = 0; i < present_id_info->swapchainCount; i++) { if (present_id_info->pPresentIds[i] != 0) { skip |= LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentInfoKHR-pNext-06235", "vkQueuePresentKHR(): presentId feature is not enabled and VkPresentIdKHR::pPresentId[%" PRIu32 "] = %" PRIu64 " when only NULL values are allowed", i, present_id_info->pPresentIds[i]); } } } if (pPresentInfo->swapchainCount != present_id_info->swapchainCount) { skip |= LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentIdKHR-swapchainCount-04998", "vkQueuePresentKHR(): VkPresentIdKHR.swapchainCount is %" PRIu32 " but pPresentInfo->swapchainCount is %" PRIu32 ". VkPresentIdKHR.swapchainCount must be the same value as VkPresentInfoKHR::swapchainCount", present_id_info->swapchainCount, pPresentInfo->swapchainCount); } for (uint32_t i = 0; i < present_id_info->swapchainCount; i++) { const auto swapchain_state = Get<SWAPCHAIN_NODE>(pPresentInfo->pSwapchains[i]); if ((present_id_info->pPresentIds[i] != 0) && (present_id_info->pPresentIds[i] <= swapchain_state->max_present_id)) { skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkPresentIdKHR-presentIds-04999", "vkQueuePresentKHR(): VkPresentIdKHR.pPresentId[%" PRIu32 "] is %" PRIu64 " and the largest presentId sent for this swapchain is %" PRIu64 ". Each presentIds entry must be greater than any previous presentIds entry passed for the " "associated pSwapchains entry", i, present_id_info->pPresentIds[i], swapchain_state->max_present_id); } } } } return skip; } bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) const { bool skip = false; if (pCreateInfos) { for (uint32_t i = 0; i < swapchainCount; i++) { const auto surface_state = Get<SURFACE_STATE>(pCreateInfos[i].surface); const auto old_swapchain_state = Get<SWAPCHAIN_NODE>(pCreateInfos[i].oldSwapchain); std::stringstream func_name; func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()"; skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state.get(), old_swapchain_state.get()); } } return skip; } bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const AcquireVersion version, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name, const char *semaphore_type_vuid) const { bool skip = false; auto semaphore_state = Get<SEMAPHORE_STATE>(semaphore); if (semaphore_state) { if (semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) { skip |= LogError(semaphore, semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY", func_name, report_data->FormatHandle(semaphore).c_str()); } else if (semaphore_state->Scope() == kSyncScopeInternal) { auto last_op = semaphore_state->LastOp(); // TODO: VUIDs 01779 and 01781 cover the case where there are pending wait or signal operations on the // semaphore. But we don't currently have a good enough way to track when acquire & present operations // are completed. So it is possible to get in a condition where the semaphore is doing // acquire / wait / acquire and the first acquire (and thus the wait) have completed, but our state // isn't aware of it yet. This results in MANY false positives. if (!last_op && !semaphore_state->Completed().CanBeSignaled()) { const char *vuid = version == ACQUIRE_VERSION_2 ? "VUID-VkAcquireNextImageInfoKHR-semaphore-01288" : "VUID-vkAcquireNextImageKHR-semaphore-01286"; skip |= LogError(semaphore, vuid, "%s: Semaphore must not be currently signaled.", func_name); } } } auto fence_state = Get<FENCE_STATE>(fence); if (fence_state) { skip |= ValidateFenceForSubmit(fence_state.get(), "VUID-vkAcquireNextImageKHR-fence-01287", "VUID-vkAcquireNextImageKHR-fence-01287", "vkAcquireNextImageKHR()"); } const auto swapchain_data = Get<SWAPCHAIN_NODE>(swapchain); if (swapchain_data) { if (swapchain_data->retired) { const char *vuid = version == ACQUIRE_VERSION_2 ? "VUID-VkAcquireNextImageInfoKHR-swapchain-01675" : "VUID-vkAcquireNextImageKHR-swapchain-01285"; skip |= LogError(swapchain, vuid, "%s: This swapchain has been retired. The application can still present any images it " "has acquired, but cannot acquire any more.", func_name); } const uint32_t acquired_images = swapchain_data->acquired_images; const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size()); auto caps = swapchain_data->surface->GetCapabilities(physical_device); const auto min_image_count = caps.minImageCount; const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count; if (timeout == UINT64_MAX && too_many_already_acquired) { const char *vuid = version == ACQUIRE_VERSION_2 ? "VUID-vkAcquireNextImage2KHR-swapchain-01803" : "VUID-vkAcquireNextImageKHR-swapchain-01802"; const uint32_t acquirable = swapchain_image_count - min_image_count + 1; skip |= LogError(swapchain, vuid, "%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32 " %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32 ", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").", func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable, acquirable > 1 ? "are" : "is", swapchain_image_count, min_image_count); } } return skip; } bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const { return ValidateAcquireNextImage(device, ACQUIRE_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex, "vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265"); } bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) const { bool skip = false; skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, pAcquireInfo->swapchain, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01290"); skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, pAcquireInfo->swapchain, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291"); skip |= ValidateAcquireNextImage(device, ACQUIRE_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR", "VUID-VkAcquireNextImageInfoKHR-semaphore-03266"); return skip; } bool CoreChecks::PreCallValidateWaitForPresentKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout) const { bool skip = false; if (!enabled_features.present_wait_features.presentWait) { skip |= LogError(swapchain, "VUID-vkWaitForPresentKHR-presentWait-06234", "vkWaitForPresentKHR(): VkWaitForPresent called but presentWait feature is not enabled"); } const auto swapchain_state = Get<SWAPCHAIN_NODE>(swapchain); if (swapchain_state) { if (swapchain_state->retired) { skip |= LogError(swapchain, "VUID-vkWaitForPresentKHR-swapchain-04997", "vkWaitForPresentKHR() called with a retired swapchain."); } } return skip; } bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) const { const auto surface_state = Get<SURFACE_STATE>(surface); bool skip = false; if ((surface_state) && (surface_state->swapchain)) { skip |= LogError(instance, "VUID-vkDestroySurfaceKHR-surface-01266", "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed."); } return skip; } #ifdef VK_USE_PLATFORM_WAYLAND_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display *display) const { const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physicalDevice); return ValidateQueueFamilyIndex(pd_state.get(), queueFamilyIndex, "VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306", "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WAYLAND_KHR #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) const { const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physicalDevice); return ValidateQueueFamilyIndex(pd_state.get(), queueFamilyIndex, "VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309", "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WIN32_KHR #ifdef VK_USE_PLATFORM_XCB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t *connection, xcb_visualid_t visual_id) const { const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physicalDevice); return ValidateQueueFamilyIndex(pd_state.get(), queueFamilyIndex, "VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312", "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XCB_KHR #ifdef VK_USE_PLATFORM_XLIB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display *dpy, VisualID visualID) const { const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physicalDevice); return ValidateQueueFamilyIndex(pd_state.get(), queueFamilyIndex, "VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315", "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XLIB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported) const { const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physicalDevice); return ValidateQueueFamilyIndex(pd_state.get(), queueFamilyIndex, "VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269", "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex"); } bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo) const { bool skip = false; const auto layout = Get<cvdescriptorset::DescriptorSetLayout>(pCreateInfo->descriptorSetLayout); if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) { skip |= LogError(pCreateInfo->descriptorSetLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350", "%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name, report_data->FormatHandle(pCreateInfo->descriptorSetLayout).c_str()); } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) { auto bind_point = pCreateInfo->pipelineBindPoint; bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) || (bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR); if (!valid_bp) { skip |= LogError(device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351", "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point)); } const auto pipeline_layout = Get<PIPELINE_LAYOUT_STATE>(pCreateInfo->pipelineLayout); if (!pipeline_layout) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352", "%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str()); } else { const uint32_t pd_set = pCreateInfo->set; if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] || !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353", "%s: pCreateInfo->set (%" PRIu32 ") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).", func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str()); } } } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType) { for (const auto &binding : layout->GetBindings()) { if (binding.descriptorType == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE) { skip |= LogError( device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-04615", "%s: pCreateInfo->templateType is VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, but " "pCreateInfo->descriptorSetLayout contains a binding with descriptor type VK_DESCRIPTOR_TYPE_MUTABLE_VALVE.", func_name); } } } for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) { const auto &descriptor_update = pCreateInfo->pDescriptorUpdateEntries[i]; if (descriptor_update.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if (descriptor_update.dstArrayElement & 3) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateEntry-descriptor-02226", "%s: pCreateInfo->pDescriptorUpdateEntries[%" PRIu32 "] has descriptorType VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, but dstArrayElement (%" PRIu32 ") is not a " "multiple of 4).", func_name, i, descriptor_update.dstArrayElement); } if (descriptor_update.descriptorCount & 3) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateEntry-descriptor-02227", "%s: pCreateInfo->pDescriptorUpdateEntries[%" PRIu32 "] has descriptorType VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, but descriptorCount (%" PRIu32 ")is not a " "multiple of 4).", func_name, i, descriptor_update.descriptorCount); } } } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo); return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo); return skip; } bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { bool skip = false; const auto template_state = Get<UPDATE_TEMPLATE_STATE>(descriptorUpdateTemplate); // Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds // but retaining the assert as template support is new enough to want to investigate these in debug builds. assert(template_state); // TODO: Validate template push descriptor updates if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state.get(), pData); } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void *pData) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()"; bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR); const auto layout_data = Get<PIPELINE_LAYOUT_STATE>(layout); const auto dsl = layout_data ? layout_data->GetDsl(set) : nullptr; // Validate the set index points to a push descriptor set and is in range if (dsl) { if (!dsl->IsPushDescriptor()) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout).c_str()); } } else if (layout_data && (set >= layout_data->set_layouts.size())) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size())); } const auto template_state = Get<UPDATE_TEMPLATE_STATE>(descriptorUpdateTemplate); if (template_state) { const auto &template_ci = template_state->create_info; static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")}; skip |= ValidatePipelineBindPoint(cb_state.get(), template_ci.pipelineBindPoint, func_name, bind_errors); if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { skip |= LogError(cb_state->commandBuffer(), kVUID_Core_PushDescriptorUpdate_TemplateType, "%s: descriptorUpdateTemplate %s was not created with flag " "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str()); } if (template_ci.set != set) { skip |= LogError(cb_state->commandBuffer(), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched, "%s: descriptorUpdateTemplate %s created with set %" PRIu32 " does not match command parameter set %" PRIu32 ".", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set); } const auto template_layout = Get<PIPELINE_LAYOUT_STATE>(template_ci.pipelineLayout); if (!CompatForSet(set, layout_data.get(), template_layout.get())) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(descriptorUpdateTemplate); objlist.add(template_ci.pipelineLayout); objlist.add(layout); skip |= LogError(objlist, kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched, "%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter " "%s for set %" PRIu32, func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), report_data->FormatHandle(template_ci.pipelineLayout).c_str(), report_data->FormatHandle(layout).c_str(), set); } } if (dsl && template_state) { // Create an empty proxy in order to use the existing descriptor set update validation cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this); // Decode the template into a set of write updates cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state.get(), pData, dsl->GetDescriptorSetLayout()); // Validate the decoded update against the proxy_ds skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()), decoded_template.desc_writes.data(), func_name); } return skip; } bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex, const char *api_name) const { bool skip = false; const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physicalDevice); if (pd_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) { if (planeIndex >= pd_state->display_plane_property_count) { skip |= LogError(physicalDevice, "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249", "%s(): planeIndex (%u) must be in the range [0, %d] that was returned by " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", api_name, planeIndex, pd_state->display_plane_property_count - 1); } } return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneSupportedDisplaysKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR *pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex, "vkGetDisplayPlaneCapabilities2KHR"); return skip; } bool CoreChecks::PreCallValidateCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) const { bool skip = false; const VkDisplayModeKHR display_mode = pCreateInfo->displayMode; const uint32_t plane_index = pCreateInfo->planeIndex; if (pCreateInfo->alphaMode == VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR) { const float global_alpha = pCreateInfo->globalAlpha; if ((global_alpha > 1.0f) || (global_alpha < 0.0f)) { skip |= LogError( display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01254", "vkCreateDisplayPlaneSurfaceKHR(): alphaMode is VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR but globalAlpha is %f.", global_alpha); } } const auto dm_state = Get<DISPLAY_MODE_STATE>(display_mode); if (dm_state != nullptr) { // Get physical device from VkDisplayModeKHR state tracking const VkPhysicalDevice physical_device = dm_state->physical_device; const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physical_device); VkPhysicalDeviceProperties device_properties = {}; DispatchGetPhysicalDeviceProperties(physical_device, &device_properties); const uint32_t width = pCreateInfo->imageExtent.width; const uint32_t height = pCreateInfo->imageExtent.height; if (width >= device_properties.limits.maxImageDimension2D) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256", "vkCreateDisplayPlaneSurfaceKHR(): width (%" PRIu32 ") exceeds device limit maxImageDimension2D (%" PRIu32 ").", width, device_properties.limits.maxImageDimension2D); } if (height >= device_properties.limits.maxImageDimension2D) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256", "vkCreateDisplayPlaneSurfaceKHR(): height (%" PRIu32 ") exceeds device limit maxImageDimension2D (%" PRIu32 ").", height, device_properties.limits.maxImageDimension2D); } if (pd_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) { if (plane_index >= pd_state->display_plane_property_count) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-planeIndex-01252", "vkCreateDisplayPlaneSurfaceKHR(): planeIndex (%u) must be in the range [0, %d] that was returned by " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", plane_index, pd_state->display_plane_property_count - 1); } else { // call here once we know the plane index used is a valid plane index VkDisplayPlaneCapabilitiesKHR plane_capabilities; DispatchGetDisplayPlaneCapabilitiesKHR(physical_device, display_mode, plane_index, &plane_capabilities); if ((pCreateInfo->alphaMode & plane_capabilities.supportedAlpha) == 0) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01255", "vkCreateDisplayPlaneSurfaceKHR(): alphaMode is %s but planeIndex %u supportedAlpha (0x%x) " "does not support the mode.", string_VkDisplayPlaneAlphaFlagBitsKHR(pCreateInfo->alphaMode), plane_index, plane_capabilities.supportedAlpha); } } } } return skip; } bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); return ValidateCmd(cb_state.get(), CMD_DEBUGMARKERBEGINEXT); } bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); return ValidateCmd(cb_state.get(), CMD_DEBUGMARKERENDEXT); } bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) const { if (disabled[query_validation]) return false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, query, index); const char *cmd_name = "vkCmdBeginQueryIndexedEXT()"; struct BeginQueryIndexedVuids : ValidateBeginQueryVuids { BeginQueryIndexedVuids() : ValidateBeginQueryVuids() { vuid_queue_flags = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool"; vuid_queue_feedback = "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338"; vuid_queue_occlusion = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803"; vuid_precise = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00800"; vuid_query_count = "VUID-vkCmdBeginQueryIndexedEXT-query-00802"; vuid_profile_lock = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223"; vuid_scope_not_first = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224"; vuid_scope_in_rp = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225"; vuid_dup_query_type = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-04753"; vuid_protected_cb = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-01885"; } }; BeginQueryIndexedVuids vuids; bool skip = ValidateBeginQuery(cb_state.get(), query_obj, flags, index, CMD_BEGINQUERYINDEXEDEXT, &vuids); // Extension specific VU's auto query_pool_state = Get<QUERY_POOL_STATE>(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { if (IsExtEnabled(device_extensions.vk_ext_transform_feedback) && (index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) { skip |= LogError( cb_state->commandBuffer(), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339", "%s: index %" PRIu32 " must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".", cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams); } } else if (index != 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340", "%s: index %" PRIu32 " must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.", cmd_name, index, report_data->FormatHandle(queryPool).c_str()); } return skip; } void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) { if (disabled[query_validation]) return; QueryObject query_obj = {queryPool, query, index}; EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()"); } void CoreChecks::PreCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) { if (disabled[query_validation]) return; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); QueryObject query_obj = {queryPool, query, index}; query_obj.endCommandIndex = cb_state->commandCount - 1; EnqueueVerifyEndQuery(commandBuffer, query_obj); } bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) const { if (disabled[query_validation]) return false; QueryObject query_obj = {queryPool, query, index}; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); struct EndQueryIndexedVuids : ValidateEndQueryVuids { EndQueryIndexedVuids() : ValidateEndQueryVuids() { vuid_queue_flags = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool"; vuid_active_queries = "VUID-vkCmdEndQueryIndexedEXT-None-02342"; vuid_protected_cb = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-02344"; } }; EndQueryIndexedVuids vuids; bool skip = false; skip |= ValidateCmdEndQuery(cb_state.get(), query_obj, index, CMD_ENDQUERYINDEXEDEXT, &vuids); const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (query_pool_state) { const auto &query_pool_ci = query_pool_state->createInfo; const uint32_t available_query_count = query_pool_state->createInfo.queryCount; if (query >= available_query_count) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQueryIndexedEXT-query-02343", "vkCmdEndQueryIndexedEXT(): query index (%" PRIu32 ") is greater or equal to the queryPool size (%" PRIu32 ").", index, available_query_count); } if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { if (IsExtEnabled(device_extensions.vk_ext_transform_feedback) && (index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) { skip |= LogError( cb_state->commandBuffer(), "VUID-vkCmdEndQueryIndexedEXT-queryType-02346", "vkCmdEndQueryIndexedEXT(): index %" PRIu32 " must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".", index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams); } } else if (index != 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQueryIndexedEXT-queryType-02347", "vkCmdEndQueryIndexedEXT(): index %" PRIu32 " must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.", index, report_data->FormatHandle(queryPool).c_str()); } } return skip; } bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; // Minimal validation for command buffer state skip |= ValidateCmd(cb_state.get(), CMD_SETDISCARDRECTANGLEEXT); skip |= ForbidInheritedViewportScissor( commandBuffer, cb_state.get(), "VUID-vkCmdSetDiscardRectangleEXT-viewportScissor2D-04788", "vkCmdSetDiscardRectangleEXT"); for (uint32_t i = 0; i < discardRectangleCount; ++i) { if (pDiscardRectangles[i].offset.x < 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetDiscardRectangleEXT-x-00587", "vkCmdSetDiscardRectangleEXT(): pDiscardRectangles[%" PRIu32 "].x (%" PRIi32 ") is negative.", i, pDiscardRectangles[i].offset.x); } if (pDiscardRectangles[i].offset.y < 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetDiscardRectangleEXT-x-00587", "vkCmdSetDiscardRectangleEXT(): pDiscardRectangles[%" PRIu32 "].y (%" PRIi32 ") is negative.", i, pDiscardRectangles[i].offset.y); } } if (firstDiscardRectangle + discardRectangleCount > phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetDiscardRectangleEXT-firstDiscardRectangle-00585", "vkCmdSetDiscardRectangleEXT(): firstDiscardRectangle (%" PRIu32 ") + discardRectangleCount (%" PRIu32 ") is not less than VkPhysicalDeviceDiscardRectanglePropertiesEXT::maxDiscardRectangles (%" PRIu32 ".", firstDiscardRectangle, discardRectangleCount, phys_dev_ext_props.discard_rectangle_props.maxDiscardRectangles); } return skip; } bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); // Minimal validation for command buffer state skip |= ValidateCmd(cb_state.get(), CMD_SETSAMPLELOCATIONSEXT); skip |= ValidateSampleLocationsInfo(pSampleLocationsInfo, "vkCmdSetSampleLocationsEXT"); const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS); const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state; if (pipe != nullptr) { // Check same error with different log messages const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pipe->create_info.graphics.pMultisampleState; if (multisample_state == nullptr) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529", "vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel must be equal to " "rasterizationSamples, but the bound graphics pipeline was created without a multisample state"); } else if (multisample_state->rasterizationSamples != pSampleLocationsInfo->sampleLocationsPerPixel) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529", "vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel (%s) is not equal to " "the last bound pipeline's rasterizationSamples (%s)", string_VkSampleCountFlagBits(pSampleLocationsInfo->sampleLocationsPerPixel), string_VkSampleCountFlagBits(multisample_state->rasterizationSamples)); } } return skip; } bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name, const VkSamplerYcbcrConversionCreateInfo *create_info) const { bool skip = false; const VkFormat conversion_format = create_info->format; // Need to check for external format conversion first as it allows for non-UNORM format bool external_format = false; #ifdef VK_USE_PLATFORM_ANDROID_KHR const VkExternalFormatANDROID *ext_format_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext); if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) { external_format = true; if (VK_FORMAT_UNDEFINED != create_info->format) { return LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904", "%s: CreateInfo format is not VK_FORMAT_UNDEFINED while " "there is a chained VkExternalFormatANDROID struct with a non-zero externalFormat.", func_name); } } #endif // VK_USE_PLATFORM_ANDROID_KHR if ((external_format == false) && (FormatIsUNORM(conversion_format) == false)) { const char *vuid = IsExtEnabled(device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061" : "VUID-VkSamplerYcbcrConversionCreateInfo-format-04060"; skip |= LogError(device, vuid, "%s: CreateInfo format (%s) is not an UNORM format and there is no external format conversion being created.", func_name, string_VkFormat(conversion_format)); } // Gets VkFormatFeatureFlags according to Sampler Ycbcr Conversion Format Features // (vkspec.html#potential-format-features) VkFormatFeatureFlags2KHR format_features = ~0ULL; if (conversion_format == VK_FORMAT_UNDEFINED) { #ifdef VK_USE_PLATFORM_ANDROID_KHR // only check for external format inside VK_FORMAT_UNDEFINED check to prevent unnecessary extra errors from no format // features being supported if (external_format == true) { auto it = ahb_ext_formats_map.find(ext_format_android->externalFormat); if (it != ahb_ext_formats_map.end()) { format_features = it->second; } } #endif // VK_USE_PLATFORM_ANDROID_KHR } else { format_features = GetPotentialFormatFeatures(conversion_format); } // Check all VUID that are based off of VkFormatFeatureFlags // These can't be in StatelessValidation due to needing possible External AHB state for feature support if (((format_features & VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR) == 0) && ((format_features & VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR) == 0)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01650", "%s: Format %s does not support either VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or " "VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT", func_name, string_VkFormat(conversion_format)); } if ((format_features & VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR) == 0) { if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651", "%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so xChromaOffset can't " "be VK_CHROMA_LOCATION_COSITED_EVEN", func_name, string_VkFormat(conversion_format)); } if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651", "%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so yChromaOffset can't " "be VK_CHROMA_LOCATION_COSITED_EVEN", func_name, string_VkFormat(conversion_format)); } } if ((format_features & VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR) == 0) { if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652", "%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so xChromaOffset can't " "be VK_CHROMA_LOCATION_MIDPOINT", func_name, string_VkFormat(conversion_format)); } if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652", "%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so yChromaOffset can't " "be VK_CHROMA_LOCATION_MIDPOINT", func_name, string_VkFormat(conversion_format)); } } if (((format_features & VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR) == 0) && (create_info->forceExplicitReconstruction == VK_TRUE)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-forceExplicitReconstruction-01656", "%s: Format %s does not support " "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT so " "forceExplicitReconstruction must be VK_FALSE", func_name, string_VkFormat(conversion_format)); } if (((format_features & VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR) == 0) && (create_info->chromaFilter == VK_FILTER_LINEAR)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-chromaFilter-01657", "%s: Format %s does not support VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT so " "chromaFilter must not be VK_FILTER_LINEAR", func_name, string_VkFormat(conversion_format)); } return skip; } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const { bool skip = false; auto num_samplers = Count<SAMPLER_STATE>(); if (num_samplers >= phys_dev_props.limits.maxSamplerAllocationCount) { skip |= LogError( device, "VUID-vkCreateSampler-maxSamplerAllocationCount-04110", "vkCreateSampler(): Number of currently valid sampler objects (%zu) is not less than the maximum allowed (%u).", num_samplers, phys_dev_props.limits.maxSamplerAllocationCount); } if (enabled_features.core11.samplerYcbcrConversion == VK_TRUE) { const VkSamplerYcbcrConversionInfo *conversion_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext); if (conversion_info != nullptr) { const VkSamplerYcbcrConversion sampler_ycbcr_conversion = conversion_info->conversion; const auto ycbcr_state = Get<SAMPLER_YCBCR_CONVERSION_STATE>(sampler_ycbcr_conversion); if ((ycbcr_state->format_features & VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR) == 0) { const VkFilter chroma_filter = ycbcr_state->chromaFilter; if (pCreateInfo->minFilter != chroma_filter) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-minFilter-01645", "VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is " "not supported for SamplerYcbcrConversion's (%s) format %s so minFilter (%s) needs to be equal to " "chromaFilter (%s)", report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format), string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter)); } if (pCreateInfo->magFilter != chroma_filter) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-minFilter-01645", "VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is " "not supported for SamplerYcbcrConversion's (%s) format %s so minFilter (%s) needs to be equal to " "chromaFilter (%s)", report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format), string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter)); } } // At this point there is a known sampler YCbCr conversion enabled const auto *sampler_reduction = LvlFindInChain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext); if (sampler_reduction != nullptr) { if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-01647", "A sampler YCbCr Conversion is being used creating this sampler so the sampler reduction mode " "must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE."); } } } } if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT || pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) { if (!enabled_features.custom_border_color_features.customBorderColors) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-customBorderColors-04085", "vkCreateSampler(): A custom border color was specified without enabling the custom border color feature"); } auto custom_create_info = LvlFindInChain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext); if (custom_create_info) { if (custom_create_info->format == VK_FORMAT_UNDEFINED && !enabled_features.custom_border_color_features.customBorderColorWithoutFormat) { skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04014", "vkCreateSampler(): A custom border color was specified as VK_FORMAT_UNDEFINED without the " "customBorderColorWithoutFormat feature being enabled"); } } if (custom_border_color_sampler_count >= phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-04012", "vkCreateSampler(): Creating a sampler with a custom border color will exceed the " "maxCustomBorderColorSamplers limit of %d", phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers); } } if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) { if ((VK_FALSE == enabled_features.portability_subset_features.samplerMipLodBias) && pCreateInfo->mipLodBias != 0) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-samplerMipLodBias-04467", "vkCreateSampler (portability error): mip LOD bias not supported."); } } // If any of addressModeU, addressModeV or addressModeW are VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, the // VK_KHR_sampler_mirror_clamp_to_edge extension or promoted feature must be enabled if ((device_extensions.vk_khr_sampler_mirror_clamp_to_edge != kEnabledByCreateinfo) && (enabled_features.core12.samplerMirrorClampToEdge == VK_FALSE)) { // Use 'else' because getting 3 large error messages is redundant and assume developer, if set all 3, will notice and fix // all at once if (pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079", "vkCreateSampler(): addressModeU is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the " "VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled."); } else if (pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079", "vkCreateSampler(): addressModeV is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the " "VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled."); } else if (pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079", "vkCreateSampler(): addressModeW is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the " "VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled."); } } return skip; } bool CoreChecks::ValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress && !enabled_features.buffer_device_address_ext_features.bufferDeviceAddress) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324", "%s: The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice && !enabled_features.buffer_device_address_ext_features.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-device-03325", "%s: If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } const auto buffer_state = Get<BUFFER_STATE>(pInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), apiName, "VUID-VkBufferDeviceAddressInfo-buffer-02600"); } skip |= ValidateBufferUsageFlags(buffer_state.get(), VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, true, "VUID-VkBufferDeviceAddressInfo-buffer-02601", apiName, "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT"); } return skip; } bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddressEXT"); } bool CoreChecks::PreCallValidateGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddressKHR"); } bool CoreChecks::PreCallValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddress"); } bool CoreChecks::ValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-None-03326", "%s(): The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-device-03327", "%s(): If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } return skip; } bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferOpaqueCaptureAddressKHR"); } bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferOpaqueCaptureAddress"); } bool CoreChecks::ValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress) { skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-None-03334", "%s(): The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-device-03335", "%s(): If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } const auto mem_info = Get<DEVICE_MEMORY_STATE>(pInfo->memory); if (mem_info) { auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext); if (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT)) { skip |= LogError(pInfo->memory, "VUID-VkDeviceMemoryOpaqueCaptureAddressInfo-memory-03336", "%s(): memory must have been allocated with VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT.", apiName); } } return skip; } bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const { return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo), "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); } bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const { return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo), "vkGetDeviceMemoryOpaqueCaptureAddress"); } bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery, uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange, const char *apiName) const { bool skip = false; if (firstQuery >= totalCount) { skip |= LogError(device, vuid_badfirst, "%s(): firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", apiName, firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str()); } if ((firstQuery + queryCount) > totalCount) { skip |= LogError(device, vuid_badrange, "%s(): Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", apiName, firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str()); } return skip; } bool CoreChecks::ValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *apiName) const { if (disabled[query_validation]) return false; bool skip = false; if (!enabled_features.core12.hostQueryReset) { skip |= LogError(device, "VUID-vkResetQueryPool-None-02665", "%s(): Host query reset not enabled for device", apiName); } const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if (query_pool_state) { skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount, "VUID-vkResetQueryPool-firstQuery-02666", "VUID-vkResetQueryPool-firstQuery-02667", apiName); } return skip; } bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPoolEXT"); } bool CoreChecks::PreCallValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPool"); } VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkValidationCacheEXT *pValidationCache) { *pValidationCache = ValidationCache::Create(pCreateInfo); return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; } void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks *pAllocator) { delete CastFromHandle<ValidationCache *>(validationCache); } VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize, void *pData) { size_t in_size = *pDataSize; CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData); return (pData && *pDataSize != in_size) ? VK_INCOMPLETE : VK_SUCCESS; } VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT *pSrcCaches) { bool skip = false; auto dst = CastFromHandle<ValidationCache *>(dstCache); VkResult result = VK_SUCCESS; for (uint32_t i = 0; i < srcCacheCount; i++) { auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]); if (src == dst) { skip |= LogError(device, "VUID-vkMergeValidationCachesEXT-dstCache-01536", "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.", HandleToUint64(dstCache)); result = VK_ERROR_VALIDATION_FAILED_EXT; } if (!skip) { dst->Merge(src); } } return result; } bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, CMD_TYPE cmd_type) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); skip |= ValidateCmd(cb_state.get(), cmd_type); skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00108"); skip |= ValidateDeviceMaskToZero(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00109"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state.get(), deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00110"); if (cb_state->activeRenderPass) { skip |= ValidateDeviceMaskToRenderPass(cb_state.get(), deviceMask, "VUID-vkCmdSetDeviceMask-deviceMask-00111"); } return skip; } bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const { return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, CMD_SETDEVICEMASK); } bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const { return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, CMD_SETDEVICEMASKKHR); } bool CoreChecks::ValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue, const char *apiName) const { bool skip = false; const auto semaphore_state = Get<SEMAPHORE_STATE>(semaphore); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(semaphore, "VUID-vkGetSemaphoreCounterValue-semaphore-03255", "%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", apiName, report_data->FormatHandle(semaphore).c_str()); } return skip; } bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const { return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValueKHR"); } bool CoreChecks::PreCallValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const { return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValue"); } bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride, const char *parameter_name, const uint64_t parameter_value, const VkQueryResultFlags flags) const { bool skip = false; if (flags & VK_QUERY_RESULT_64_BIT) { static const int condition_multiples = 0b0111; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= LogError(device, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } else { static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= LogError(device, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size) const { bool skip = false; static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (stride < struct_size)) { skip |= LogError(commandBuffer, vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size); } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size, const uint32_t drawCount, const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const { bool skip = false; uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size; if (validation_value > buffer_state->createInfo.size) { skip |= LogError(commandBuffer, vuid, "stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64 " is greater than the size[%" PRIx64 "] of %s.", stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size, report_data->FormatHandle(buffer_state->buffer()).c_str()); } return skip; } bool CoreChecks::PreCallValidateReleaseProfilingLockKHR(VkDevice device) const { bool skip = false; if (!performance_lock_acquired) { skip |= LogError(device, "VUID-vkReleaseProfilingLockKHR-device-03235", "vkReleaseProfilingLockKHR(): The profiling lock of device must have been held via a previous successful " "call to vkAcquireProfilingLockKHR."); } return skip; } bool CoreChecks::PreCallValidateCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void *pCheckpointMarker) const { { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETCHECKPOINTNV); return skip; } } bool CoreChecks::PreCallValidateWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, size_t dataSize, void *pData, size_t stride) const { bool skip = false; for (uint32_t i = 0; i < accelerationStructureCount; ++i) { const auto as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pAccelerationStructures[i]); const auto &as_info = as_state->build_info_khr; if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) { if (!(as_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431", "vkWriteAccelerationStructuresPropertiesKHR: All acceleration structures (%s) in " "pAccelerationStructures must have been built with" "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.", report_data->FormatHandle(as_state->acceleration_structure()).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); skip |= ValidateCmd(cb_state.get(), CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESKHR); const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != queryType) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryPool-02493", "vkCmdWriteAccelerationStructuresPropertiesKHR: queryPool must have been created with a queryType matching queryType."); } for (uint32_t i = 0; i < accelerationStructureCount; ++i) { if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) { const auto as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pAccelerationStructures[i]); if (!(as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431", "vkCmdWriteAccelerationStructuresPropertiesKHR: All acceleration structures in pAccelerationStructures " "must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR."); } } } return skip; } bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); skip |= ValidateCmd(cb_state.get(), CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESNV); const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != queryType) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-queryPool-03755", "vkCmdWriteAccelerationStructuresPropertiesNV: queryPool must have been created with a queryType matching queryType."); } for (uint32_t i = 0; i < accelerationStructureCount; ++i) { if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) { const auto as_state = Get<ACCELERATION_STRUCTURE_STATE>(pAccelerationStructures[i]); if (!(as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-pAccelerationStructures-06215", "vkCmdWriteAccelerationStructuresPropertiesNV: All acceleration structures in pAccelerationStructures " "must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV."); } } } return skip; } uint32_t CoreChecks::CalcTotalShaderGroupCount(const PIPELINE_STATE *pipelineState) const { const auto &create_info = pipelineState->create_info.raytracing; uint32_t total = create_info.groupCount; if (create_info.pLibraryInfo) { for (uint32_t i = 0; i < create_info.pLibraryInfo->libraryCount; ++i) { const auto library_pipeline_state = Get<PIPELINE_STATE>(create_info.pLibraryInfo->pLibraries[i]); total += CalcTotalShaderGroupCount(library_pipeline_state.get()); } } return total; } bool CoreChecks::PreCallValidateGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void *pData) const { bool skip = false; const auto pipeline_state = Get<PIPELINE_STATE>(pipeline); if (pipeline_state->GetPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) { skip |= LogError( device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-pipeline-03482", "vkGetRayTracingShaderGroupHandlesKHR: pipeline must have not been created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR."); } if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleSize * groupCount)) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-dataSize-02420", "vkGetRayTracingShaderGroupHandlesKHR: dataSize (%zu) must be at least " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleSize * groupCount.", dataSize); } uint32_t total_group_count = CalcTotalShaderGroupCount(pipeline_state.get()); if (firstGroup >= total_group_count) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-04050", "vkGetRayTracingShaderGroupHandlesKHR: firstGroup must be less than the number of shader groups in pipeline."); } if ((firstGroup + groupCount) > total_group_count) { skip |= LogError( device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-02419", "vkGetRayTracingShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less than or equal the number " "of shader groups in pipeline."); } return skip; } bool CoreChecks::PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void *pData) const { bool skip = false; if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleCaptureReplaySize * groupCount)) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: dataSize (%zu) must be at least " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleCaptureReplaySize * groupCount.", dataSize); } const auto pipeline_state = Get<PIPELINE_STATE>(pipeline); if (!pipeline_state) { return skip; } const auto &create_info = pipeline_state->create_info.raytracing; if (firstGroup >= create_info.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: firstGroup must be less than the number of shader " "groups in pipeline."); } if ((firstGroup + groupCount) > create_info.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less " "than or equal to the number of shader groups in pipeline."); } if (!(create_info.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-pipeline-03607", "pipeline must have been created with a flags that included " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR."); } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkDeviceAddress *pIndirectDeviceAddresses, const uint32_t *pIndirectStrides, const uint32_t *const *ppMaxPrimitiveCounts) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR); skip |= ValidateCmdRayQueryState(cb_state.get(), CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR); for (uint32_t i = 0; i < infoCount; ++i) { const auto src_as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfos[i].srcAccelerationStructure); const auto dst_as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfos[i].dstAccelerationStructure); if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03667", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have " "been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03758", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[i].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03759", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[i].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03760", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03700", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03699", "vkCmdBuildAccelerationStructuresIndirectKHR():For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } } return skip; } bool CoreChecks::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo, const char *api_name) const { bool skip = false; if (pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR) { const auto src_as_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfo->src); if (!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-src-03411", "(%s): src must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR" "if mode is VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR.", api_name); } } const auto src_accel_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfo->src); if (src_accel_state) { const auto buffer_state = Get<BUFFER_STATE>(src_accel_state->create_infoKHR.buffer); skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), api_name, "VUID-VkCopyAccelerationStructureInfoKHR-buffer-03718"); } const auto dst_accel_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfo->dst); if (dst_accel_state) { const auto buffer_state = Get<BUFFER_STATE>(dst_accel_state->create_infoKHR.buffer); skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), api_name, "VUID-VkCopyAccelerationStructureInfoKHR-buffer-03719"); } return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR *pInfo) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); skip |= ValidateCmd(cb_state.get(), CMD_COPYACCELERATIONSTRUCTUREKHR); skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR"); return skip; } bool CoreChecks::PreCallValidateCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR *pInfo) const { bool skip = false; skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR"); return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_COPYACCELERATIONSTRUCTURETOMEMORYKHR); const auto accel_state = Get<ACCELERATION_STRUCTURE_STATE_KHR>(pInfo->src); if (accel_state) { const auto buffer_state = Get<BUFFER_STATE>(accel_state->create_infoKHR.buffer); skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), "vkCmdCopyAccelerationStructureToMemoryKHR", "VUID-vkCmdCopyAccelerationStructureToMemoryKHR-None-03559"); } return skip; } bool CoreChecks::PreCallValidateCmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_COPYMEMORYTOACCELERATIONSTRUCTUREKHR); return skip; } bool CoreChecks::PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const { bool skip = false; char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-transformFeedback-02355", "%s: transformFeedback feature is not enabled.", cmd_name); } { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-None-02365", "%s: transform feedback is active.", cmd_name); } } for (uint32_t i = 0; i < bindingCount; ++i) { const auto buffer_state = Get<BUFFER_STATE>(pBuffers[i]); assert(buffer_state != nullptr); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02358", "%s: pOffset[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than or equal to the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pOffsets[i], i, buffer_state->createInfo.size); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02360", "%s: pBuffers[%" PRIu32 "] (%s)" " was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT flag.", cmd_name, i, report_data->FormatHandle(pBuffers[i]).c_str()); } // pSizes is optional and may be nullptr. Also might be VK_WHOLE_SIZE which VU don't apply if ((pSizes != nullptr) && (pSizes[i] != VK_WHOLE_SIZE)) { // only report one to prevent redundant error if the size is larger since adding offset will be as well if (pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSizes-02362", "%s: pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pSizes[i], i, buffer_state->createInfo.size); } else if (pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02363", "%s: The sum of pOffsets[%" PRIu32 "](Ox%" PRIxLEAST64 ") and pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pOffsets[i], i, pSizes[i], i, buffer_state->createInfo.size); } } skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), cmd_name, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02364"); } return skip; } bool CoreChecks::PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdBeginTransformFeedbackEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-transformFeedback-02366", "%s: transformFeedback feature is not enabled.", cmd_name); } { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (cb_state) { if (cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02367", "%s: transform feedback is active.", cmd_name); } if (cb_state->activeRenderPass) { const auto &rp_ci = cb_state->activeRenderPass->createInfo; for (uint32_t i = 0; i < rp_ci.subpassCount; ++i) { // When a subpass uses a non-zero view mask, multiview functionality is considered to be enabled if (rp_ci.pSubpasses[i].viewMask > 0) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02373", "%s: active render pass (%s) has multiview enabled.", cmd_name, report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); break; } } } } } // pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr // if pCounterBuffers is nullptr. if (pCounterBuffers == nullptr) { if (pCounterBufferOffsets != nullptr) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffer-02371", "%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name); } } else { for (uint32_t i = 0; i < counterBufferCount; ++i) { if (pCounterBuffers[i] != VK_NULL_HANDLE) { const auto buffer_state = Get<BUFFER_STATE>(pCounterBuffers[i]); assert(buffer_state != nullptr); if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBufferOffsets-02370", "%s: pCounterBuffers[%" PRIu32 "](%s) is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIx64 ").", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str(), i, pCounterBufferOffsets[i]); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffers-02372", "%s: pCounterBuffers[%" PRIu32 "] (%s) was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str()); } } } } return skip; } bool CoreChecks::PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdEndTransformFeedbackEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-transformFeedback-02374", "%s: transformFeedback feature is not enabled.", cmd_name); } { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-None-02375", "%s: transform feedback is not active.", cmd_name); } } // pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr // if pCounterBuffers is nullptr. if (pCounterBuffers == nullptr) { if (pCounterBufferOffsets != nullptr) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffer-02379", "%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name); } } else { for (uint32_t i = 0; i < counterBufferCount; ++i) { if (pCounterBuffers[i] != VK_NULL_HANDLE) { const auto buffer_state = Get<BUFFER_STATE>(pCounterBuffers[i]); assert(buffer_state != nullptr); if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdEndTransformFeedbackEXT-pCounterBufferOffsets-02378", "%s: pCounterBuffers[%" PRIu32 "](%s) is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIx64 ").", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str(), i, pCounterBufferOffsets[i]); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffers-02380", "%s: pCounterBuffers[%" PRIu32 "] (%s) was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str()); } } } } return skip; } bool CoreChecks::PreCallValidateCmdSetLogicOpEXT(VkCommandBuffer commandBuffer, VkLogicOp logicOp) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETLOGICOPEXT); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp) { skip |= LogError(commandBuffer, "VUID-vkCmdSetLogicOpEXT-None-04867", "vkCmdSetLogicOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer, uint32_t patchControlPoints) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETPATCHCONTROLPOINTSEXT); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-None-04873", "vkCmdSetPatchControlPointsEXT: extendedDynamicState feature is not enabled."); } if (patchControlPoints > phys_dev_props.limits.maxTessellationPatchSize) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-patchControlPoints-04874", "vkCmdSetPatchControlPointsEXT: The value of patchControlPoints must be less than " "VkPhysicalDeviceLimits::maxTessellationPatchSize"); } return skip; } bool CoreChecks::PreCallValidateCmdSetRasterizerDiscardEnableEXT(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETRASTERIZERDISCARDENABLEEXT); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) { skip |= LogError(commandBuffer, "VUID-vkCmdSetRasterizerDiscardEnableEXT-None-04871", "vkCmdSetRasterizerDiscardEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBiasEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETDEPTHBIASENABLEEXT); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBiasEnableEXT-None-04872", "vkCmdSetDepthBiasEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetPrimitiveRestartEnableEXT(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETPRIMITIVERESTARTENABLEEXT); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveRestartEnableEXT-None-04866", "vkCmdSetPrimitiveRestartEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETCULLMODEEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetCullModeEXT-None-03384", "vkCmdSetCullModeEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETFRONTFACEEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetFrontFaceEXT-None-03383", "vkCmdSetFrontFaceEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETPRIMITIVETOPOLOGYEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347", "vkCmdSetPrimitiveTopologyEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport *pViewports) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETVIEWPORTWITHCOUNTEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-None-03393", "vkCmdSetViewportWithCountEXT: extendedDynamicState feature is not enabled."); } skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state.get(), "VUID-vkCmdSetViewportWithCountEXT-commandBuffer-04819", "vkCmdSetViewportWithCountEXT"); return skip; } bool CoreChecks::PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D *pScissors) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETSCISSORWITHCOUNTEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-None-03396", "vkCmdSetScissorWithCountEXT: extendedDynamicState feature is not enabled."); } skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state.get(), "VUID-vkCmdSetScissorWithCountEXT-commandBuffer-04820", "vkCmdSetScissorWithCountEXT"); return skip; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes, const VkDeviceSize *pStrides) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_BINDVERTEXBUFFERS2EXT); for (uint32_t i = 0; i < bindingCount; ++i) { const auto buffer_state = Get<BUFFER_STATE>(pBuffers[i]); if (buffer_state) { skip |= ValidateBufferUsageFlags(buffer_state.get(), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359", "vkCmdBindVertexBuffers2EXT()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state.get(), "vkCmdBindVertexBuffers2EXT()", "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03360"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357", "vkCmdBindVertexBuffers2EXT() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } if (pSizes && pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358", "vkCmdBindVertexBuffers2EXT() size (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pSizes[i]); } } } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETDEPTHTESTENABLEEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthTestEnableEXT-None-03352", "vkCmdSetDepthTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETDEPTHWRITEENABLEEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354", "vkCmdSetDepthWriteEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETDEPTHCOMPAREOPEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthCompareOpEXT-None-03353", "vkCmdSetDepthCompareOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETDEPTHBOUNDSTESTENABLEEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349", "vkCmdSetDepthBoundsTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETSTENCILTESTENABLEEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilTestEnableEXT-None-03350", "vkCmdSetStencilTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETSTENCILOPEXT); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilOpEXT-None-03351", "vkCmdSetStencilOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) const { bool skip = false; if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) { if (VK_FALSE == enabled_features.portability_subset_features.events) { skip |= LogError(device, "VUID-vkCreateEvent-events-04468", "vkCreateEvent: events are not supported via VK_KHR_portability_subset"); } } return skip; } bool CoreChecks::PreCallValidateCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); skip |= ValidateCmd(cb_state.get(), CMD_SETRAYTRACINGPIPELINESTACKSIZEKHR); return skip; } bool CoreChecks::PreCallValidateGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader) const { bool skip = false; const auto pipeline_state = Get<PIPELINE_STATE>(pipeline); if (pipeline_state) { if (pipeline_state->GetPipelineType() != VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-pipeline-04622", "vkGetRayTracingShaderGroupStackSizeKHR: Pipeline must be a ray-tracing pipeline, but is a %s pipeline.", GetPipelineTypeName(pipeline_state->GetPipelineType())); } else if (group >= pipeline_state->create_info.raytracing.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-group-03608", "vkGetRayTracingShaderGroupStackSizeKHR: The value of group must be less than the number of shader groups " "in pipeline."); } } return skip; } bool CoreChecks::PreCallValidateCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D *pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const { const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); assert(cb_state); const char *cmd_name = "vkCmdSetFragmentShadingRateKHR()"; bool skip = false; skip |= ValidateCmd(cb_state.get(), CMD_SETFRAGMENTSHADINGRATEKHR); if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && !enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && !enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { skip |= LogError( cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04509", "vkCmdSetFragmentShadingRateKHR: Application called %s, but no fragment shading rate features have been enabled.", cmd_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->width != 1) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04507", "vkCmdSetFragmentShadingRateKHR: Pipeline fragment width of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pFragmentSize->width, cmd_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->height != 1) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04508", "vkCmdSetFragmentShadingRateKHR: Pipeline fragment height of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pFragmentSize->height, cmd_name); } if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-primitiveFragmentShadingRate-04510", "vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but " "primitiveFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name); } if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-attachmentFragmentShadingRate-04511", "vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but " "attachmentFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512", "vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is " "not supported", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512", "vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps " "is not supported", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name); } if (pFragmentSize->width == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04513", "vkCmdSetFragmentShadingRateKHR: Fragment width of %u has been specified in %s.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04514", "vkCmdSetFragmentShadingRateKHR: Fragment height of %u has been specified in %s.", pFragmentSize->height, cmd_name); } if (pFragmentSize->width != 0 && !IsPowerOfTwo(pFragmentSize->width)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04515", "vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment width of %u has been specified in %s.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height != 0 && !IsPowerOfTwo(pFragmentSize->height)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04516", "vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment height of %u has been specified in %s.", pFragmentSize->height, cmd_name); } if (pFragmentSize->width > 4) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04517", "vkCmdSetFragmentShadingRateKHR: Fragment width of %u specified in %s is too large.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height > 4) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04518", "vkCmdSetFragmentShadingRateKHR: Fragment height of %u specified in %s is too large", pFragmentSize->height, cmd_name); } return skip; } bool CoreChecks::PreCallValidateCmdSetColorWriteEnableEXT(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkBool32 *pColorWriteEnables) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (!enabled_features.color_write_features.colorWriteEnable) { skip |= LogError(commandBuffer, "VUID-vkCmdSetColorWriteEnableEXT-None-04803", "vkCmdSetColorWriteEnableEXT: color write is not enabled."); } auto graphics_pipeline = cb_state->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS); if (graphics_pipeline) { uint32_t pipeline_attachment_count = graphics_pipeline->create_info.graphics.pColorBlendState->attachmentCount; if (attachmentCount != pipeline_attachment_count) { skip |= LogError( commandBuffer, "VUID-vkCmdSetColorWriteEnableEXT-attachmentCount-04804", "vkCmdSetColorWriteEnableEXT: attachment count (%" PRIu32 ") is not equal to currenly bound pipelines VkPipelineColorBlendStateCreateInfo::attachmentCount (%" PRIu32 ").", attachmentCount, pipeline_attachment_count); } } return skip; } bool CoreChecks::PreCallValidateCmdBeginConditionalRenderingEXT( VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (cb_state && cb_state->conditional_rendering_active) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginConditionalRenderingEXT-None-01980", "vkCmdBeginConditionalRenderingEXT(): Conditional rendering is already active."); } if (pConditionalRenderingBegin) { const auto buffer_state = Get<BUFFER_STATE>(pConditionalRenderingBegin->buffer); if (buffer_state) { if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT) == 0) { skip |= LogError(commandBuffer, "VUID-VkConditionalRenderingBeginInfoEXT-buffer-01982", "vkCmdBeginConditionalRenderingEXT(): pConditionalRenderingBegin->buffer (%s) was not create with " "VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT bit.", report_data->FormatHandle(pConditionalRenderingBegin->buffer).c_str()); } if (pConditionalRenderingBegin->offset + 4 > buffer_state->createInfo.size) { skip |= LogError(commandBuffer, "VUID-VkConditionalRenderingBeginInfoEXT-offset-01983", "vkCmdBeginConditionalRenderingEXT(): pConditionalRenderingBegin->offset (%" PRIu64 ") + 4 bytes is not less than the size of pConditionalRenderingBegin->buffer (%" PRIu64 ").", pConditionalRenderingBegin->offset, buffer_state->createInfo.size); } } } return skip; } bool CoreChecks::PreCallValidateCmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer) const { bool skip = false; const auto cb_state = Get<CMD_BUFFER_STATE>(commandBuffer); if (cb_state) { if (!cb_state->conditional_rendering_active) { skip |= LogError(commandBuffer, "VUID-vkCmdEndConditionalRenderingEXT-None-01985", "vkCmdBeginConditionalRenderingEXT(): Conditional rendering is not active."); } if (!cb_state->conditional_rendering_inside_render_pass && cb_state->activeRenderPass != nullptr) { skip |= LogError(commandBuffer, "VUID-vkCmdEndConditionalRenderingEXT-None-01986", "vkCmdBeginConditionalRenderingEXT(): Conditional rendering was begun outside outside of a render " "pass instance, but a render pass instance is currently active in the command buffer."); } if (cb_state->conditional_rendering_inside_render_pass && cb_state->activeRenderPass != nullptr && cb_state->conditional_rendering_subpass != cb_state->activeSubpass) { skip |= LogError(commandBuffer, "VUID-vkCmdEndConditionalRenderingEXT-None-01987", "vkCmdBeginConditionalRenderingEXT(): Conditional rendering was begun in subpass %" PRIu32 ", but the current subpass is %" PRIu32 ".", cb_state->conditional_rendering_subpass, cb_state->activeSubpass); } } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateAcquireFullScreenExclusiveModeEXT(VkDevice device, VkSwapchainKHR swapchain) const { bool skip = false; const auto swapchain_state = Get<SWAPCHAIN_NODE>(swapchain); if (swapchain_state) { if (swapchain_state->retired) { skip |= LogError(device, "VUID-vkAcquireFullScreenExclusiveModeEXT-swapchain-02674", "vkAcquireFullScreenExclusiveModeEXT(): swapchain %s is retired.", report_data->FormatHandle(swapchain).c_str()); } const auto *surface_full_screen_exclusive_info = LvlFindInChain<VkSurfaceFullScreenExclusiveInfoEXT>(swapchain_state->createInfo.pNext); if (!surface_full_screen_exclusive_info || surface_full_screen_exclusive_info->fullScreenExclusive != VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT) { skip |= LogError(device, "VUID-vkAcquireFullScreenExclusiveModeEXT-swapchain-02675", "vkAcquireFullScreenExclusiveModeEXT(): swapchain %s was not created with VkSurfaceFullScreenExclusiveInfoEXT in " "the pNext chain with fullScreenExclusive equal to VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT.", report_data->FormatHandle(swapchain).c_str()); } } return skip; } #endif bool CoreChecks::ValidatePhysicalDeviceSurfaceSupport(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, const char *vuid, const char *func_name) const { bool skip = false; const auto pd_state = Get<PHYSICAL_DEVICE_STATE>(physicalDevice); const auto surface_state = Get<SURFACE_STATE>(surface); if (pd_state && surface_state) { bool is_supported = false; for (uint32_t i = 0; i < pd_state->queue_family_properties.size(); i++) { if (surface_state->GetQueueSupport(physicalDevice, i)) { is_supported = true; break; } } if (!is_supported) { skip |= LogError(physicalDevice, vuid, "%s(): surface is not supported by the physicalDevice.", func_name); } } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateGetDeviceGroupSurfacePresentModes2EXT(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR *pModes) const { bool skip = false; if (physical_device_count == 1) { ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); skip |= ValidatePhysicalDeviceSurfaceSupport(device_object->physical_device, pSurfaceInfo->surface, "VUID-vkGetDeviceGroupSurfacePresentModes2EXT-pSurfaceInfo-06213", "vkGetDeviceGroupSurfacePresentModes2EXT"); } else { for (uint32_t i = 0; i < physical_device_count; ++i) { skip |= ValidatePhysicalDeviceSurfaceSupport(device_group_create_info.pPhysicalDevices[i], pSurfaceInfo->surface, "VUID-vkGetDeviceGroupSurfacePresentModes2EXT-pSurfaceInfo-06213", "vkGetDeviceGroupSurfacePresentModes2EXT"); } } return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfacePresentModes2EXT(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, pSurfaceInfo->surface, "VUID-vkGetPhysicalDeviceSurfacePresentModes2EXT-pSurfaceInfo-06210", "vkGetPhysicalDeviceSurfacePresentModes2EXT"); return skip; } #endif bool CoreChecks::PreCallValidateGetDeviceGroupSurfacePresentModesKHR(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR *pModes) const { bool skip = false; if (physical_device_count == 1) { ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); skip |= ValidatePhysicalDeviceSurfaceSupport(device_object->physical_device, surface, "VUID-vkGetDeviceGroupSurfacePresentModesKHR-surface-06212", "vkGetDeviceGroupSurfacePresentModesKHR"); } else { for (uint32_t i = 0; i < physical_device_count; ++i) { skip |= ValidatePhysicalDeviceSurfaceSupport(device_group_create_info.pPhysicalDevices[i], surface, "VUID-vkGetDeviceGroupSurfacePresentModesKHR-surface-06212", "vkGetDeviceGroupSurfacePresentModesKHR"); } } return skip; } bool CoreChecks::PreCallValidateGetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pRectCount, VkRect2D *pRects) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface, "VUID-vkGetPhysicalDevicePresentRectanglesKHR-surface-06211", "vkGetPhysicalDevicePresentRectanglesKHR"); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT *pSurfaceCapabilities) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface, "VUID-vkGetPhysicalDeviceSurfaceCapabilities2EXT-surface-06211", "vkGetPhysicalDeviceSurfaceCapabilities2EXT"); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, VkSurfaceCapabilities2KHR *pSurfaceCapabilities) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, pSurfaceInfo->surface, "VUID-vkGetPhysicalDeviceSurfaceCapabilities2KHR-pSurfaceInfo-06210", "vkGetPhysicalDeviceSurfaceCapabilities2KHR"); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface, "VUID-vkGetPhysicalDeviceSurfaceCapabilitiesKHR-surface-06211", "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, pSurfaceInfo->surface, "VUID-vkGetPhysicalDeviceSurfaceFormats2KHR-pSurfaceInfo-06210", "vkGetPhysicalDeviceSurfaceFormats2KHR"); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface, "VUID-vkGetPhysicalDeviceSurfaceFormatsKHR-surface-06211", "vkGetPhysicalDeviceSurfaceFormatsKHR"); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes) const { bool skip = false; skip |= ValidatePhysicalDeviceSurfaceSupport(physicalDevice, surface, "VUID-vkGetPhysicalDeviceSurfacePresentModesKHR-surface-06211", "vkGetPhysicalDeviceSurfacePresentModesKHR"); return skip; } void CoreChecks::PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags, VkResult result) { if (result != VK_SUCCESS) { return; } const auto query_pool_state = Get<QUERY_POOL_STATE>(queryPool); if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) == 0) { for (uint32_t i = firstQuery; i < queryCount; ++i) { query_pool_state->SetQueryState(i, 0, QUERYSTATE_AVAILABLE); } } }
1
23,944
Should this be a constant somewhere?
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -17,5 +17,9 @@ using System.Runtime.CompilerServices; [assembly: InternalsVisibleTo("OpenTelemetry.Tests" + AssemblyInfo.PublicKey)] +[assembly: InternalsVisibleTo("OpenTelemetry.Instrumentation.Http" + AssemblyInfo.PublicKey)] +[assembly: InternalsVisibleTo("OpenTelemetry.Instrumentation.AspNet" + AssemblyInfo.PublicKey)] +[assembly: InternalsVisibleTo("OpenTelemetry.Instrumentation.AspNetCore" + AssemblyInfo.PublicKey)] +[assembly: InternalsVisibleTo("OpenTelemetry.Instrumentation.GrpcNetClient" + AssemblyInfo.PublicKey)] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2" + AssemblyInfo.MoqPublicKey)] [assembly: InternalsVisibleTo("Benchmarks" + AssemblyInfo.PublicKey)]
1
// <copyright file="AssemblyInfo.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System.Runtime.CompilerServices; [assembly: InternalsVisibleTo("OpenTelemetry.Tests" + AssemblyInfo.PublicKey)] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2" + AssemblyInfo.MoqPublicKey)] [assembly: InternalsVisibleTo("Benchmarks" + AssemblyInfo.PublicKey)]
1
17,849
nit: sort alphabetically.
open-telemetry-opentelemetry-dotnet
.cs
@@ -28,6 +28,10 @@ import org.apache.solr.common.SolrException; * @lucene.internal */ class JsonQueryConverter { + public static final String paramsPrefix = "_tt"; + + public static final Object contextKey = JsonQueryConverter.class.getSimpleName(); + private int numParams = 0; String toLocalParams(Object jsonQueryObject, Map<String, String[]> additionalParams) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.request.json; import java.util.List; import java.util.Map; import org.apache.solr.common.SolrException; /** * Convert json query object to local params. * * @lucene.internal */ class JsonQueryConverter { private int numParams = 0; String toLocalParams(Object jsonQueryObject, Map<String, String[]> additionalParams) { if (jsonQueryObject instanceof String) return jsonQueryObject.toString(); StringBuilder builder = new StringBuilder(); buildLocalParams(builder, jsonQueryObject, true, additionalParams); return builder.toString(); } private String putParam(String val, Map<String, String[]> additionalParams) { String name = "_tt"+(numParams++); additionalParams.put(name, new String[]{val}); return name; } // when isQParser==true, "val" is a query object of the form {query_type:{param1:val1, param2:val2}} // when isQParser==false, "val" is a parameter on an existing qparser (which could be a simple parameter like 42, or a sub-query) private void buildLocalParams(StringBuilder builder, Object val, boolean isQParser, Map<String, String[]> additionalParams) { if (!isQParser && !(val instanceof Map)) { // val is value of a query parser, and it is not a map throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error when parsing json query, expect a json object here, but found : "+val); // NOTE: a top-level query *can* be a String, so we should really allow it here. This currently only works because // we special-case String in toLocalParams() and don't call this method. } // We don't want to introduce unnecessary variable at root level boolean useSubBuilder = builder.length() > 0; if (val instanceof String) { if (!useSubBuilder) { // Top level, so just use the value. NOTE: this case is also short-circuited in toLocalParams() for performance. builder.append(val.toString()); } else { // val is a parameter in a qparser, so use param deref and skip escaping: ...=$param1}&param1=<val> builder.append('$').append(putParam(val.toString(), additionalParams)); } return; } if (val instanceof Number) { builder.append(val); return; } if (!(val instanceof Map)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error when parsing json query, expect a json object here, but found : "+val); } Map<String,Object> map = (Map<String, Object>) val; if (isQParser) { if (map.size() != 1) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error when parsing json query, expect only one query parser here, but found : "+map.keySet()); } String qtype = map.keySet().iterator().next(); String tagName = null; if (qtype.startsWith("#")) { Object taggedQueryObject = map.get(qtype); tagName = qtype.substring(1); if (taggedQueryObject instanceof String) { StringBuilder sb = new StringBuilder(); sb.append("{!tag=").append(tagName).append("}"); sb.append(taggedQueryObject.toString()); buildLocalParams(builder, sb.toString(), true, additionalParams); return; } else if (taggedQueryObject instanceof Map) { map = (Map<String, Object>) taggedQueryObject; qtype = map.keySet().iterator().next(); // FUTURE: might want to recurse here instead to handle nested tags (and add tagName as a parameter?) } } StringBuilder subBuilder = useSubBuilder ? new StringBuilder() : builder; Object subVal = map.get(qtype); subBuilder = subBuilder.append("{!").append(qtype).append(' '); if (tagName != null) { subBuilder.append("tag=").append(tagName).append(' '); } buildLocalParams(subBuilder, subVal, false, additionalParams); subBuilder.append("}"); if (useSubBuilder) { builder.append('$').append(putParam(subBuilder.toString(), additionalParams)); } } else { for (Map.Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); if (entry.getValue() instanceof List) { if (key.equals("query")) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error when parsing json query, value of query field should not be a list, found : " + entry.getValue()); } List l = (List) entry.getValue(); for (Object subVal : l) { builder.append(key).append("="); buildLocalParams(builder, subVal, true, additionalParams); builder.append(" "); } } else { if (key.equals("query")) { key = "v"; } builder.append(key).append("="); buildLocalParams(builder, entry.getValue(), true, additionalParams); builder.append(" "); } } } } }
1
31,732
Now, It's redundant, I suppose.
apache-lucene-solr
java
@@ -70,6 +70,8 @@ type ChooserList interface { // // peerlist.List and ListImplementation compose well with sharding schemes the // degenerate to returning the only available peer. +// +// Deprecated in favor of "go.uber.org/yarpc/peer/peerlist/v2".Implementation. type ListImplementation interface { transport.Lifecycle
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package peer import ( "context" "go.uber.org/yarpc/api/transport" ) // Chooser is a collection of Peers. Outbounds request peers from the // peer.Chooser to determine where to send requests. // The chooser is responsible for managing the lifecycle of any retained peers. type Chooser interface { transport.Lifecycle // Choose a Peer for the next call, block until a peer is available (or timeout) Choose(context.Context, *transport.Request) (peer Peer, onFinish func(error), err error) } // List listens to adds and removes of Peers from a peer list updater. // A Chooser will implement the List interface in order to receive // updates to the list of Peers it is keeping track of. type List interface { // Update performs the additions and removals to the Peer List Update(updates ListUpdates) error } // ListUpdates specifies the updates to be made to a List type ListUpdates struct { // Additions are the identifiers that should be added to the list Additions []Identifier // Removals are the identifiers that should be removed to the list Removals []Identifier } // ChooserList is both a Chooser and a List, useful for expressing both // capabilities of a single instance. type ChooserList interface { Chooser List } // ListImplementation is a collection of available peers, with its own // subscribers for peer status change notifications. // The available peer list encapsulates the logic for selecting from among // available peers, whereas a ChooserList is responsible for retaining, // releasing, and monitoring peer availability. // Use "go.uber.org/yarpc/peer/peerlist".List in conjunction with a // ListImplementation to produce a "go.uber.org/yarpc/api/peer".List. // // peerlist.List and ListImplementation compose well with sharding schemes the // degenerate to returning the only available peer. type ListImplementation interface { transport.Lifecycle Add(StatusPeer) Subscriber Remove(StatusPeer, Subscriber) // Choose must return an available peer under a list read lock, so must // not block. Choose(context.Context, *transport.Request) StatusPeer } // Binder is a callback for peer.Bind that accepts a peer list and binds it to // a peer list updater for the duration of the returned peer list updater. // The peer list updater must implement the lifecycle interface, and start and // stop updates over that lifecycle. // The binder must not block on updating the list, because update may block // until the peer list has started. // The binder must return a peer list updater that will begin updating when it // starts, and stop updating when it stops. type Binder func(List) transport.Lifecycle
1
17,234
nit: The format recognized by tooling is `// Deprecated: [..]` so you likely want this to be, // Deprecated: Use "go.uber.org/yarpc/peer/peerlist/v2".Implementation instead.
yarpc-yarpc-go
go
@@ -41,9 +41,9 @@ func lookupTelemetryEndpoint(cfg config.Local, genesisNetwork protocol.NetworkID for _, bootstrapID := range bootstrapArray { addrs, err := ReadFromSRV("telemetry", bootstrapID, cfg.FallbackDNSResolverAddress) if err != nil { - log.Warnf("An issue occurred reading telemetry entry for: %s", bootstrapID) + log.Infof("An issue occurred reading telemetry entry for: %s", bootstrapID) } else if len(addrs) == 0 { - log.Warnf("No telemetry entry for: %s", bootstrapID) + log.Infof("No telemetry entry for: %s", bootstrapID) } else { return addrs[0] }
1
package network import ( "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" ) // StartTelemetryURIUpdateService starts a go routine which queries SRV records for a telemetry URI every <interval> func StartTelemetryURIUpdateService(interval time.Duration, cfg config.Local, genesisNetwork protocol.NetworkID, log logging.Logger, abort chan struct{}) { go func() { ticker := time.NewTicker(interval) defer ticker.Stop() updateTelemetryURI := func() { endpoint := lookupTelemetryEndpoint(cfg, genesisNetwork, log) if endpoint != "" && endpoint != log.GetTelemetryURI() { log.UpdateTelemetryURI(endpoint) } } // Update telemetry right away, followed by once every <interval> updateTelemetryURI() for { select { case <-ticker.C: updateTelemetryURI() case <-abort: return } } }() } func lookupTelemetryEndpoint(cfg config.Local, genesisNetwork protocol.NetworkID, log logging.Logger) string { bootstrapArray := cfg.DNSBootstrapArray(genesisNetwork) bootstrapArray = append(bootstrapArray, "default.algodev.network") for _, bootstrapID := range bootstrapArray { addrs, err := ReadFromSRV("telemetry", bootstrapID, cfg.FallbackDNSResolverAddress) if err != nil { log.Warnf("An issue occurred reading telemetry entry for: %s", bootstrapID) } else if len(addrs) == 0 { log.Warnf("No telemetry entry for: %s", bootstrapID) } else { return addrs[0] } } log.Warn("No telemetry endpoint was found.") return "" }
1
36,909
nit: 1. when formatting input strings, make sure to place them in quotes so we could identify white space issues. i.e. '%s' 2. If there is untyped, non-nil error, you want to log the error string as well.
algorand-go-algorand
go
@@ -4,7 +4,16 @@ namespace Datadog.Trace.ClrProfiler { internal static class NativeMethods { - [DllImport("Datadog.Trace.ClrProfiler.Native.dll")] - public static extern bool IsProfilerAttached(); + public static class Windows + { + [DllImport("Datadog.Trace.ClrProfiler.Native.dll")] + public static extern bool IsProfilerAttached(); + } + + public static class Linux + { + [DllImport("Datadog.Trace.ClrProfiler.Native.so")] + public static extern bool IsProfilerAttached(); + } } }
1
using System.Runtime.InteropServices; namespace Datadog.Trace.ClrProfiler { internal static class NativeMethods { [DllImport("Datadog.Trace.ClrProfiler.Native.dll")] public static extern bool IsProfilerAttached(); } }
1
14,974
I believe this can be fixed with the original code if you just omit the ".dll" file extension so it reads `[DllImport("Datadog.Trace.ClrProfiler.Native")]`. On Windows it would look for `Datadog.Trace.ClrProfiler.Native.dll` and Linux/Mac it would look for `Datadog.Trace.ClrProfiler.Native.so`.
DataDog-dd-trace-dotnet
.cs
@@ -71,6 +71,11 @@ class UpdateManager { UserPreferences.setEpisodeCleanupValue(oldValueInDays * 24); } // else 0 or special negative values, no change needed } + if (oldVersionCode < 1070197) { + if (prefs.getBoolean(UserPreferences.PREF_MOBILE_UPDATE_OLD, false)) { + prefs.edit().putString(UserPreferences.PREF_MOBILE_UPDATE, "everything").apply(); + } + } } }
1
package de.danoeh.antennapod.core; import android.content.Context; import android.content.SharedPreferences; import android.content.pm.PackageInfo; import android.content.pm.PackageManager; import android.os.Build; import android.util.Log; import org.antennapod.audio.MediaPlayer; import de.danoeh.antennapod.core.preferences.UserPreferences; /* * This class's job is do perform maintenance tasks whenever the app has been updated */ class UpdateManager { private UpdateManager(){} private static final String TAG = UpdateManager.class.getSimpleName(); private static final String PREF_NAME = "app_version"; private static final String KEY_VERSION_CODE = "version_code"; private static int currentVersionCode; private static Context context; private static SharedPreferences prefs; public static void init(Context context) { UpdateManager.context = context; prefs = context.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE); PackageManager pm = context.getPackageManager(); try { PackageInfo info = pm.getPackageInfo(context.getPackageName(), 0); currentVersionCode = info.versionCode; } catch (PackageManager.NameNotFoundException e) { Log.e(TAG, "Failed to obtain package info for package name: " + context.getPackageName(), e); currentVersionCode = 0; return; } final int oldVersionCode = getStoredVersionCode(); Log.d(TAG, "old: " + oldVersionCode + ", current: " + currentVersionCode); if(oldVersionCode < currentVersionCode) { onUpgrade(oldVersionCode, currentVersionCode); setCurrentVersionCode(); } } private static int getStoredVersionCode() { return prefs.getInt(KEY_VERSION_CODE, -1); } private static void setCurrentVersionCode() { prefs.edit().putInt(KEY_VERSION_CODE, currentVersionCode).apply(); } private static void onUpgrade(final int oldVersionCode, final int newVersionCode) { if (oldVersionCode < 1050004) { if(MediaPlayer.isPrestoLibraryInstalled(context) && Build.VERSION.SDK_INT >= 16) { UserPreferences.enableSonic(); } } if (oldVersionCode < 1070196) { // migrate episode cleanup value (unit changed from days to hours) int oldValueInDays = UserPreferences.getEpisodeCleanupValue(); if (oldValueInDays > 0) { UserPreferences.setEpisodeCleanupValue(oldValueInDays * 24); } // else 0 or special negative values, no change needed } } }
1
14,658
couldn't we just read the boolean as a string (I would assume that this return "true" or "false"), migrate if to its new value and keep using the same pref key?
AntennaPod-AntennaPod
java
@@ -126,6 +126,7 @@ static int skip_file_check = 0; static int cache_mode = 1; +static int json_input = 0; static int jx_input = 0; static char *jx_context = NULL;
1
/* Copyright (C) 2008- The University of Notre Dame This software is distributed under the GNU General Public License. See the file COPYING for details. */ #include "auth_all.h" #include "auth_ticket.h" #include "batch_job.h" #include "cctools.h" #include "copy_stream.h" #include "create_dir.h" #include "debug.h" #include "getopt_aux.h" #include "hash_table.h" #include "int_sizes.h" #include "itable.h" #include "link.h" #include "list.h" #include "load_average.h" #include "macros.h" #include "path.h" #include "random.h" #include "rmonitor.h" #include "stringtools.h" #include "work_queue.h" #include "work_queue_catalog.h" #include "xxmalloc.h" #include "jx.h" #include "jx_print.h" #include "jx_parse.h" #include "jx_eval.h" #include "create_dir.h" #include "sha1.h" #include "dag.h" #include "dag_visitors.h" #include "parser.h" #include "parser_jx.h" #include "makeflow_summary.h" #include "makeflow_gc.h" #include "makeflow_log.h" #include "makeflow_wrapper.h" #include "makeflow_wrapper_docker.h" #include "makeflow_wrapper_monitor.h" #include "makeflow_wrapper_umbrella.h" #include "makeflow_mounts.h" #include "makeflow_wrapper_enforcement.h" #include "makeflow_wrapper_singularity.h" #include "makeflow_archive.h" #include "makeflow_catalog_reporter.h" #include <fcntl.h> #include <sys/stat.h> #include <sys/types.h> #include <libgen.h> #include <assert.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> /* Code organization notes: - The modules dag/dag_node/dag_file etc contain the data structures that represent the dag structure by itself. Functions named dag_*() create and manipulate those data structures, but do not execute the dag itself. These are shared between makeflow and other tools that read and manipulate the dag, like makeflow_viz, makeflow_linker, and so forth. - The modules makeflow/makeflow_log/makeflow_gc etc contain the functions that execute the dag by invoking batch operations, processing the log, etc. These are all functions named makeflow_*() to distinguish them from dag_*(). - The separation between dag structure and execution state is imperfect, because some of the execution state (note states, node counts, etc) is stored in struct dag and struct dag_node. Perhaps this can be improved. - All operations on files should use the batch_fs_*() functions, rather than invoking Unix I/O directly. This is because some batch systems (Hadoop, Confuga, etc) also include the storage where the files to be accessed are located. - APIs like work_queue_* should be indirectly accessed by setting options in Batch Job using batch_queue_set_option. See batch_job_work_queue.c for an example. */ #define MAX_REMOTE_JOBS_DEFAULT 100 static sig_atomic_t makeflow_abort_flag = 0; static int makeflow_failed_flag = 0; static int makeflow_submit_timeout = 3600; static int makeflow_retry_flag = 0; static int makeflow_retry_max = 5; /* makeflow_gc_method indicates the type of garbage collection * indicated by the user. Refer to makeflow_gc.h for specifics */ static makeflow_gc_method_t makeflow_gc_method = MAKEFLOW_GC_NONE; /* Disk size at which point GC is run */ static uint64_t makeflow_gc_size = 0; /* # of files after which GC is run */ static int makeflow_gc_count = -1; /* Iterations of wait loop prior ot GC check */ static int makeflow_gc_barrier = 1; /* Determines next gc_barrier to make checks less frequent with large number of tasks */ static double makeflow_gc_task_ratio = 0.05; static batch_queue_type_t batch_queue_type = BATCH_QUEUE_TYPE_LOCAL; static struct batch_queue *local_queue = 0; static struct batch_queue *remote_queue = 0; static int local_jobs_max = 1; static int remote_jobs_max = MAX_REMOTE_JOBS_DEFAULT; static char *project = NULL; static int port = 0; static int output_len_check = 0; static int skip_file_check = 0; static int cache_mode = 1; static int jx_input = 0; static char *jx_context = NULL; static container_mode_t container_mode = CONTAINER_MODE_NONE; static char *container_image = NULL; static char *container_image_tar = NULL; static char *parrot_path = "./parrot_run"; /* Wait upto this many seconds for an output file of a succesfull task to appear on the local filesystem (e.g, to deal with NFS semantics. */ static int file_creation_patience_wait_time = 0; /* Write a verbose transaction log with SYMBOL tags. SYMBOLs are category labels (SYMBOLs should be deprecated once weaver/pbui tools are updated.) */ static int log_verbose_mode = 0; static struct makeflow_wrapper *wrapper = 0; static struct makeflow_monitor *monitor = 0; static struct makeflow_wrapper *enforcer = 0; static struct makeflow_wrapper_umbrella *umbrella = 0; static int catalog_reporting_on = 0; static char *mountfile = NULL; static char *mount_cache = NULL; static int use_mountfile = 0; static struct list *shared_fs_list = NULL; static int did_find_archived_job = 0; /* Generates file list for node based on node files, wrapper input files, and monitor input files. Relies on %% nodeid replacement for monitor file names. */ static struct list *makeflow_generate_input_files( struct dag_node *n ) { struct list *result = list_duplicate(n->source_files); if(wrapper) result = makeflow_wrapper_generate_files(result, wrapper->input_files, n, wrapper); if(enforcer) result = makeflow_wrapper_generate_files(result, enforcer->input_files, n, enforcer); if(umbrella) result = makeflow_wrapper_generate_files(result, umbrella->wrapper->input_files, n, umbrella->wrapper); if(monitor) result = makeflow_wrapper_generate_files(result, monitor->wrapper->input_files, n, monitor->wrapper); return result; } static struct list *makeflow_generate_output_files( struct dag_node *n ) { struct list *result = list_duplicate(n->target_files); if(wrapper) result = makeflow_wrapper_generate_files(result, wrapper->output_files, n, wrapper); if(enforcer) result = makeflow_wrapper_generate_files(result, enforcer->output_files, n, enforcer); if(umbrella) result = makeflow_wrapper_generate_files(result, umbrella->wrapper->output_files, n, umbrella->wrapper); if(monitor) result = makeflow_wrapper_generate_files(result, monitor->wrapper->output_files, n, monitor->wrapper); return result; } /* Abort one job in a given batch queue. */ static void makeflow_abort_job( struct dag *d, struct dag_node *n, struct batch_queue *q, UINT64_T jobid, const char *name ) { printf("aborting %s job %" PRIu64 "\n", name, jobid); batch_job_remove(q, jobid); makeflow_log_state_change(d, n, DAG_NODE_STATE_ABORTED); struct list *outputs = makeflow_generate_output_files(n); struct dag_file *f; list_first_item(outputs); while((f = list_next_item(outputs))) makeflow_clean_file(d, q, f, 0); makeflow_clean_node(d, q, n, 1); } /* Abort the dag by removing all batch jobs from all queues. */ static void makeflow_abort_all(struct dag *d) { UINT64_T jobid; struct dag_node *n; printf("got abort signal...\n"); itable_firstkey(d->local_job_table); while(itable_nextkey(d->local_job_table, &jobid, (void **) &n)) { makeflow_abort_job(d,n,local_queue,jobid,"local"); } itable_firstkey(d->remote_job_table); while(itable_nextkey(d->remote_job_table, &jobid, (void **) &n)) { makeflow_abort_job(d,n,remote_queue,jobid,"remote"); } } static void makeflow_node_force_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n); /* Decide whether to rerun a node based on batch and file system status. The silent option was added for to prevent confusing debug output when in clean mode. When clean_mode is not NONE we silence the node reseting output. */ void makeflow_node_decide_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n, int silent) { struct dag_file *f; if(itable_lookup(rerun_table, n->nodeid)) return; // Below are a bunch of situations when a node has to be rerun. // If a job was submitted to Condor, then just reconnect to it. if(n->state == DAG_NODE_STATE_RUNNING && !(n->local_job && local_queue) && batch_queue_type == BATCH_QUEUE_TYPE_CONDOR) { // Reconnect the Condor jobs if(!silent) fprintf(stderr, "rule still running: %s\n", n->command); itable_insert(d->remote_job_table, n->jobid, n); // Otherwise, we cannot reconnect to the job, so rerun it } else if(n->state == DAG_NODE_STATE_RUNNING || n->state == DAG_NODE_STATE_FAILED || n->state == DAG_NODE_STATE_ABORTED) { if(!silent) fprintf(stderr, "will retry failed rule: %s\n", n->command); goto rerun; } // Rerun if an input file has been updated since the last execution. list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if(dag_file_should_exist(f)) { continue; } else { if(!f->created_by) { if(!silent) fprintf(stderr, "makeflow: input file %s does not exist and is not created by any rule.\n", f->filename); exit(1); } else { /* If input file is missing, but node completed and file was garbage, then avoid rerunning. */ if(n->state == DAG_NODE_STATE_COMPLETE && f->state == DAG_FILE_STATE_DELETE) { continue; } goto rerun; } } } // Rerun if an output file is missing. list_first_item(n->target_files); while((f = list_next_item(n->target_files))) { if(dag_file_should_exist(f)) continue; /* If output file is missing, but node completed and file was gc'ed, then avoid rerunning. */ if(n->state == DAG_NODE_STATE_COMPLETE && f->state == DAG_FILE_STATE_DELETE) continue; goto rerun; } // Do not rerun this node return; rerun: makeflow_node_force_rerun(rerun_table, d, n); } /* Reset all state to cause a node to be re-run. */ void makeflow_node_force_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n) { struct dag_node *p; struct dag_file *f1; struct dag_file *f2; int child_node_found; if(itable_lookup(rerun_table, n->nodeid)) return; // Mark this node as having been rerun already itable_insert(rerun_table, n->nodeid, n); // Remove running batch jobs if(n->state == DAG_NODE_STATE_RUNNING) { if(n->local_job && local_queue) { batch_job_remove(local_queue, n->jobid); itable_remove(d->local_job_table, n->jobid); } else { batch_job_remove(remote_queue, n->jobid); itable_remove(d->remote_job_table, n->jobid); } } // Clean up things associated with this node struct list *outputs = makeflow_generate_output_files(n); list_first_item(outputs); while((f1 = list_next_item(outputs))) makeflow_clean_file(d, remote_queue, f1, 0); makeflow_clean_node(d, remote_queue, n, 0); makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING); // For each parent node, rerun it if input file was garbage collected list_first_item(n->source_files); while((f1 = list_next_item(n->source_files))) { if(dag_file_should_exist(f1)) continue; p = f1->created_by; if(p) { makeflow_node_force_rerun(rerun_table, d, p); f1->reference_count += 1; } } // For each child node, rerun it list_first_item(n->target_files); while((f1 = list_next_item(n->target_files))) { for(p = d->nodes; p; p = p->next) { child_node_found = 0; list_first_item(p->source_files); while((f2 = list_next_item(n->source_files))) { if(!strcmp(f1->filename, f2->filename)) { child_node_found = 1; break; } } if(child_node_found) { makeflow_node_force_rerun(rerun_table, d, p); } } } } /* Update nested jobs with appropriate number of local jobs (total local jobs max / maximum number of concurrent nests). */ static void makeflow_prepare_nested_jobs(struct dag *d) { int dag_nested_width = dag_width(d, 1); int update_dag_nests = 1; char *s = getenv("MAKEFLOW_UPDATE_NESTED_JOBS"); if(s) update_dag_nests = atoi(s); if(dag_nested_width > 0 && update_dag_nests) { dag_nested_width = MIN(dag_nested_width, local_jobs_max); struct dag_node *n; for(n = d->nodes; n; n = n->next) { if(n->nested_job && ((n->local_job && local_queue) || batch_queue_type == BATCH_QUEUE_TYPE_LOCAL)) { char *command = xxmalloc(strlen(n->command) + 20); sprintf(command, "%s -j %d", n->command, local_jobs_max / dag_nested_width); free((char *) n->command); n->command = command; } } } } /* Match a filename (/home/fred) to a path stem (/home). Returns 0 on match, non-zero otherwise. */ static int prefix_match(void *stem, const void *filename) { assert(stem); assert(filename); return strncmp(stem, filename, strlen(stem)); } /* Returns true if the given filename is located in a shared filesystem, as given by the shared_fs_list. */ static int makeflow_file_on_sharedfs( const char *filename ) { return !list_iterate(shared_fs_list,prefix_match,filename); } /* Given a file, return the string that identifies it appropriately for the given batch system, combining the local and remote name and making substitutions according to the node. */ static char * makeflow_file_format( struct dag_node *n, struct dag_file *f, struct batch_queue *queue ) { const char *remotename = dag_node_get_remote_name(n, f->filename); if(!remotename && wrapper) remotename = makeflow_wrapper_get_remote_name(wrapper, n->d, f->filename); if(!remotename && enforcer) remotename = makeflow_wrapper_get_remote_name(enforcer, n->d, f->filename); if(!remotename && monitor) remotename = makeflow_wrapper_get_remote_name(monitor->wrapper, n->d, f->filename); if(!remotename && umbrella) remotename = makeflow_wrapper_get_remote_name(umbrella->wrapper, n->d, f->filename); if(!remotename) remotename = f->filename; switch (batch_queue_get_type(queue)) { case BATCH_QUEUE_TYPE_WORK_QUEUE: return string_format("%s=%s,", f->filename, remotename); default: return string_format("%s,", f->filename); } } /* Given a list of files, add the files to the given string. Returns the original string, realloced if necessary */ static char * makeflow_file_list_format( struct dag_node *node, char *file_str, struct list *file_list, struct batch_queue *queue ) { struct dag_file *file; if(!file_str) file_str = strdup(""); if(!file_list) return file_str; list_first_item(file_list); while((file=list_next_item(file_list))) { if (makeflow_file_on_sharedfs(file->filename)) { debug(D_MAKEFLOW_RUN, "Skipping file %s on shared fs\n", file->filename); continue; } char *f = makeflow_file_format(node,file,queue); file_str = string_combine(file_str,f); free(f); } return file_str; } /* Submit one fully formed job, retrying failures up to the makeflow_submit_timeout. This is necessary because busy batch systems occasionally do not accept a job submission. */ static batch_job_id_t makeflow_node_submit_retry( struct batch_queue *queue, const char *command, const char *input_files, const char *output_files, struct jx *envlist, const struct rmsummary *resources) { time_t stoptime = time(0) + makeflow_submit_timeout; int waittime = 1; batch_job_id_t jobid = 0; /* Display the fully elaborated command, just like Make does. */ printf("submitting job: %s\n", command); while(1) { jobid = batch_job_submit(queue, command, input_files, output_files, envlist, resources); if(jobid >= 0) { printf("submitted job %"PRIbjid"\n", jobid); return jobid; } fprintf(stderr, "couldn't submit batch job, still trying...\n"); if(makeflow_abort_flag) break; if(time(0) > stoptime) { fprintf(stderr, "unable to submit job after %d seconds!\n", makeflow_submit_timeout); break; } sleep(waittime); waittime *= 2; if(waittime > 60) waittime = 60; } return 0; } /* Expand a dag_node into a text list of input files, output files, and a command, by applying all wrappers and settings. Used at both job submission and completion to obtain identical strings. */ static void makeflow_node_expand( struct dag_node *n, struct batch_queue *queue, struct list **input_list, struct list **output_list, char **input_files, char **output_files, char **command ) { makeflow_wrapper_umbrella_set_input_files(umbrella, queue, n); if (*input_list == NULL) { *input_list = makeflow_generate_input_files(n); } if (*output_list == NULL) { *output_list = makeflow_generate_output_files(n); } /* Create strings for all the files mentioned by this node. */ *input_files = makeflow_file_list_format(n, 0, *input_list, queue); *output_files = makeflow_file_list_format(n, 0, *output_list, queue); /* Expand the command according to each of the wrappers */ *command = strdup(n->command); *command = makeflow_wrap_wrapper(*command, n, wrapper); *command = makeflow_wrap_enforcer(*command, n, enforcer, *input_list, *output_list); *command = makeflow_wrap_umbrella(*command, n, umbrella, queue, *input_files, *output_files); *command = makeflow_wrap_monitor(*command, n, queue, monitor); } /* Submit a node to the appropriate batch system, after materializing the necessary list of input and output files, and applying all wrappers and options. */ static void makeflow_node_submit(struct dag *d, struct dag_node *n) { struct batch_queue *queue; struct dag_file *f; struct list *input_list = NULL, *output_list = NULL; char *input_files = NULL, *output_files = NULL, *command = NULL; if(n->local_job && local_queue) { queue = local_queue; } else { queue = remote_queue; } makeflow_node_expand(n, queue, &input_list, &output_list, &input_files, &output_files, &command); /* Before setting the batch job options (stored in the "BATCH_OPTIONS" * variable), we must save the previous global queue value, and then * restore it after we submit. */ struct dag_variable_lookup_set s = { d, n->category, n, NULL }; char *batch_options = dag_variable_lookup_string("BATCH_OPTIONS", &s); char *previous_batch_options = NULL; if(batch_queue_get_option(queue, "batch-options")) previous_batch_options = xxstrdup(batch_queue_get_option(queue, "batch-options")); if(batch_options) { debug(D_MAKEFLOW_RUN, "Batch options: %s\n", batch_options); batch_queue_set_option(queue, "batch-options", batch_options); free(batch_options); } batch_queue_set_int_option(queue, "task-id", n->nodeid); /* Generate the environment vars specific to this node. */ struct jx *envlist = dag_node_env_create(d,n); /* Logs the creation of output files. */ makeflow_log_file_list_state_change(d,output_list,DAG_FILE_STATE_EXPECT); /* check archiving directory to see if node has already been preserved */ if (d->should_read_archive && makeflow_archive_is_preserved(d, n, command, input_list, output_list)) { printf("node %d already exists in archive, replicating output files\n", n->nodeid); /* copy archived files to working directory and update state for node and dag_files */ makeflow_archive_copy_preserved_files(d, n, output_list); n->state = DAG_NODE_STATE_RUNNING; list_first_item(n->target_files); while((f = list_next_item(n->target_files))) { makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXISTS); } makeflow_log_state_change(d, n, DAG_NODE_STATE_COMPLETE); did_find_archived_job = 1; } else { /* Now submit the actual job, retrying failures as needed. */ n->jobid = makeflow_node_submit_retry(queue,command,input_files,output_files,envlist, dag_node_dynamic_label(n)); /* Update all of the necessary data structures. */ if(n->jobid >= 0) { makeflow_log_state_change(d, n, DAG_NODE_STATE_RUNNING); if(n->local_job && local_queue) { itable_insert(d->local_job_table, n->jobid, n); } else { itable_insert(d->remote_job_table, n->jobid, n); } } else { makeflow_log_state_change(d, n, DAG_NODE_STATE_FAILED); makeflow_failed_flag = 1; } } /* Restore old batch job options. */ if(previous_batch_options) { batch_queue_set_option(queue, "batch-options", previous_batch_options); free(previous_batch_options); } free(command); list_delete(input_list); list_delete(output_list); free(input_files); free(output_files); jx_delete(envlist); } static int makeflow_node_ready(struct dag *d, struct dag_node *n) { struct dag_file *f; if(n->state != DAG_NODE_STATE_WAITING) return 0; if(n->local_job && local_queue) { if(dag_local_jobs_running(d) >= local_jobs_max) return 0; } else { if(dag_remote_jobs_running(d) >= remote_jobs_max) return 0; } list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if(dag_file_should_exist(f)) { continue; } else { return 0; } } return 1; } /* Find all jobs ready to be run, then submit them. */ static void makeflow_dispatch_ready_jobs(struct dag *d) { struct dag_node *n; for(n = d->nodes; n; n = n->next) { if(dag_remote_jobs_running(d) >= remote_jobs_max && dag_local_jobs_running(d) >= local_jobs_max) { break; } if(makeflow_node_ready(d, n)) { makeflow_node_submit(d, n); } } } /* Check the the indicated file was created and log, error, or retry as appropriate. */ int makeflow_node_check_file_was_created(struct dag_node *n, struct dag_file *f) { struct stat buf; int file_created = 0; int64_t start_check = time(0); while(!file_created) { if(batch_fs_stat(remote_queue, f->filename, &buf) < 0) { fprintf(stderr, "%s did not create file %s\n", n->command, f->filename); } else if(output_len_check && buf.st_size <= 0) { debug(D_MAKEFLOW_RUN, "%s created a file of length %ld\n", n->command, (long) buf.st_size); } else { /* File was created and has length larger than zero. */ debug(D_MAKEFLOW_RUN, "File %s created by rule %d.\n", f->filename, n->nodeid); f->actual_size = buf.st_size; makeflow_log_file_state_change(n->d, f, DAG_FILE_STATE_EXISTS); file_created = 1; break; } if(file_creation_patience_wait_time > 0 && time(0) - start_check < file_creation_patience_wait_time) { /* Failed to see the file. Sleep and try again. */ debug(D_MAKEFLOW_RUN, "Checking again for file %s.\n", f->filename); sleep(1); } else { /* Failed was not seen by makeflow in the aloted tries. */ debug(D_MAKEFLOW_RUN, "File %s was not created by rule %d.\n", f->filename, n->nodeid); file_created = 0; break; } } return file_created; } /* Mark the given task as completing, using the batch_job_info completion structure provided by batch_job. */ static void makeflow_node_complete(struct dag *d, struct dag_node *n, struct batch_queue *queue, struct batch_job_info *info) { struct dag_file *f; int job_failed = 0; int monitor_retried = 0; if(n->state != DAG_NODE_STATE_RUNNING) return; if(monitor) { char *nodeid = string_format("%d",n->nodeid); char *output_prefix = NULL; if(batch_queue_supports_feature(queue, "output_directories") || n->local_job) { output_prefix = xxstrdup(monitor->log_prefix); } else { output_prefix = xxstrdup(path_basename(monitor->log_prefix)); } char *log_name_prefix = string_replace_percents(output_prefix, nodeid); char *summary_name = string_format("%s.summary", log_name_prefix); if(n->resources_measured) rmsummary_delete(n->resources_measured); n->resources_measured = rmsummary_parse_file_single(summary_name); category_accumulate_summary(n->category, n->resources_measured, NULL); makeflow_monitor_move_output_if_needed(n, queue, monitor); free(nodeid); free(log_name_prefix); free(summary_name); } struct list *outputs = makeflow_generate_output_files(n); if(info->disk_allocation_exhausted) { job_failed = 1; } else if(info->exited_normally && info->exit_code == 0) { list_first_item(outputs); while((f = list_next_item(outputs))) { if(!makeflow_node_check_file_was_created(n, f)) { job_failed = 1; } } } else { if(info->exited_normally) { fprintf(stderr, "%s failed with exit code %d\n", n->command, info->exit_code); } else { fprintf(stderr, "%s crashed with signal %d (%s)\n", n->command, info->exit_signal, strsignal(info->exit_signal)); } job_failed = 1; } if(job_failed) { makeflow_log_state_change(d, n, DAG_NODE_STATE_FAILED); /* Clean files created in node. Clean existing and expected and record deletion. */ list_first_item(outputs); while((f = list_next_item(outputs))) { if(f->state == DAG_FILE_STATE_EXPECT) { makeflow_clean_file(d, remote_queue, f, 1); } else { makeflow_clean_file(d, remote_queue, f, 0); } } if(info->disk_allocation_exhausted) { fprintf(stderr, "\nrule %d failed because it exceeded its loop device allocation capacity.\n", n->nodeid); if(n->resources_measured) { rmsummary_print(stderr, n->resources_measured, /* pprint */ 0, /* extra fields */ NULL); fprintf(stderr, "\n"); } category_allocation_t next = category_next_label(n->category, n->resource_request, /* resource overflow */ 1, n->resources_requested, n->resources_measured); if(next != CATEGORY_ALLOCATION_ERROR) { debug(D_MAKEFLOW_RUN, "Rule %d resubmitted using new resource allocation.\n", n->nodeid); n->resource_request = next; fprintf(stderr, "\nrule %d resubmitting with maximum resources.\n", n->nodeid); makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING); if(monitor) { monitor_retried = 1; } } } if(monitor && info->exit_code == RM_OVERFLOW) { debug(D_MAKEFLOW_RUN, "rule %d failed because it exceeded the resources limits.\n", n->nodeid); if(n->resources_measured && n->resources_measured->limits_exceeded) { char *str = rmsummary_print_string(n->resources_measured->limits_exceeded, 1); debug(D_MAKEFLOW_RUN, "%s", str); free(str); } category_allocation_t next = category_next_label(n->category, n->resource_request, /* resource overflow */ 1, n->resources_requested, n->resources_measured); if(next != CATEGORY_ALLOCATION_ERROR) { debug(D_MAKEFLOW_RUN, "Rule %d resubmitted using new resource allocation.\n", n->nodeid); n->resource_request = next; makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING); monitor_retried = 1; } } if(!monitor_retried) { if(makeflow_retry_flag || info->exit_code == 101) { n->failure_count++; if(n->failure_count > makeflow_retry_max) { notice(D_MAKEFLOW_RUN, "job %s failed too many times.", n->command); makeflow_failed_flag = 1; } else { notice(D_MAKEFLOW_RUN, "will retry failed job %s", n->command); makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING); } } else { makeflow_failed_flag = 1; } } else { makeflow_failed_flag = 1; } } else { /* Mark source files that have been used by this node */ list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { f->reference_count+= -1; if(f->reference_count == 0 && f->state == DAG_FILE_STATE_EXISTS) makeflow_log_file_state_change(d, f, DAG_FILE_STATE_COMPLETE); } /* store node into archiving directory */ if (d->should_write_to_archive) { printf("archiving node within archiving directory\n"); struct list *input_list = NULL; char *input_files = NULL, *output_files = NULL, *command = NULL; makeflow_node_expand(n, queue, &input_list, &outputs, &input_files, &output_files, &command); makeflow_archive_populate(d, n, command, input_list, outputs, info); free(command); free(input_files); free(output_files); list_delete(input_list); } makeflow_log_state_change(d, n, DAG_NODE_STATE_COMPLETE); } list_delete(outputs); } /* Check the dag for consistency, and emit errors if input dependencies, etc are missing. */ static int makeflow_check(struct dag *d) { struct stat buf; struct dag_node *n; struct dag_file *f; int error = 0; debug(D_MAKEFLOW_RUN, "checking rules for consistency...\n"); for(n = d->nodes; n; n = n->next) { list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if(f->created_by) { continue; } if(skip_file_check || batch_fs_stat(remote_queue, f->filename, &buf) >= 0) { continue; } if(f->source) { continue; } fprintf(stderr, "makeflow: %s does not exist, and is not created by any rule.\n", f->filename); error++; } } if(error) { fprintf(stderr, "makeflow: found %d errors during consistency check.\n", error); return 0; } else { return 1; } } /* Used to check that features used are supported by the batch system. This would be where we added checking of selected options to verify they are supported by the batch system, such as work_queue specific options. */ static int makeflow_check_batch_consistency(struct dag *d) { struct dag_node *n; struct dag_file *f; int error = 0; debug(D_MAKEFLOW_RUN, "checking for consistency of batch system support...\n"); for(n = d->nodes; n; n = n->next) { if(itable_size(n->remote_names) > 0 || (wrapper && wrapper->uses_remote_rename)){ if(n->local_job) { debug(D_ERROR, "Remote renaming is not supported with -Tlocal or LOCAL execution. Rule %d.\n", n->nodeid); error = 1; break; } else if (!batch_queue_supports_feature(remote_queue, "remote_rename")) { debug(D_ERROR, "Remote renaming is not supported on selected batch system. Rule %d.\n", n->nodeid); error = 1; break; } } if(!batch_queue_supports_feature(remote_queue, "absolute_path") && !n->local_job){ list_first_item(n->source_files); while((f = list_next_item(n->source_files)) && !error) { if(makeflow_file_on_sharedfs(f->filename)) continue; const char *remotename = dag_node_get_remote_name(n, f->filename); if((remotename && *remotename == '/') || (*f->filename == '/' && !remotename)) { debug(D_ERROR, "Absolute paths are not supported on selected batch system. Rule %d.\n", n->nodeid); error = 1; break; } } list_first_item(n->target_files); while((f = list_next_item(n->target_files)) && !error) { if(makeflow_file_on_sharedfs(f->filename)) continue; const char *remotename = dag_node_get_remote_name(n, f->filename); if((remotename && *remotename == '/') || (*f->filename == '/' && !remotename)) { debug(D_ERROR, "Absolute paths are not supported on selected batch system. Rule %d.\n", n->nodeid); error = 1; break; } } } } if(error) { return 0; } else { return 1; } } /* Main loop for running a makeflow: submit jobs, wait for completion, keep going until everything done. */ static void makeflow_run( struct dag *d ) { struct dag_node *n; batch_job_id_t jobid; struct batch_job_info info; timestamp_t last_time = timestamp_get(); timestamp_t start = timestamp_get(); int first_report = 1; //reporting to catalog if(catalog_reporting_on){ makeflow_catalog_summary(d, project, batch_queue_type, start); } while(!makeflow_abort_flag) { did_find_archived_job = 0; makeflow_dispatch_ready_jobs(d); /* Due to the fact that archived jobs are never "run", no local or remote jobs are added to the remote or local job table if all ready jobs were found within the archive. Thus makeflow_dispatch_ready_jobs must run at least once more if an archived job was found. */ if(dag_local_jobs_running(d)==0 && dag_remote_jobs_running(d)==0 && did_find_archived_job == 0 ) break; if(dag_remote_jobs_running(d)) { int tmp_timeout = 5; jobid = batch_job_wait_timeout(remote_queue, &info, time(0) + tmp_timeout); if(jobid > 0) { printf("job %"PRIbjid" completed\n",jobid); debug(D_MAKEFLOW_RUN, "Job %" PRIbjid " has returned.\n", jobid); n = itable_remove(d->remote_job_table, jobid); if(n) makeflow_node_complete(d, n, remote_queue, &info); } } if(dag_local_jobs_running(d)) { time_t stoptime; int tmp_timeout = 5; if(dag_remote_jobs_running(d)) { stoptime = time(0); } else { stoptime = time(0) + tmp_timeout; } jobid = batch_job_wait_timeout(local_queue, &info, stoptime); if(jobid > 0) { debug(D_MAKEFLOW_RUN, "Job %" PRIbjid " has returned.\n", jobid); n = itable_remove(d->local_job_table, jobid); if(n) makeflow_node_complete(d, n, local_queue, &info); } } /* Make periodic report to catalog. */ timestamp_t now = timestamp_get(); if(catalog_reporting_on && (((now-last_time) > (60 * 1000 * 1000)) || first_report==1)){ //if we are in reporting mode, and if either it's our first report, or 1 min has transpired makeflow_catalog_summary(d, project,batch_queue_type,start); last_time = now; first_report = 0; } /* Rather than try to garbage collect after each time in this * wait loop, perform garbage collection after a proportional * amount of tasks have passed. */ makeflow_gc_barrier--; if(makeflow_gc_method != MAKEFLOW_GC_NONE && makeflow_gc_barrier == 0) { makeflow_gc(d, remote_queue, makeflow_gc_method, makeflow_gc_size, makeflow_gc_count); makeflow_gc_barrier = MAX(d->nodeid_counter * makeflow_gc_task_ratio, 1); } } /* Always make final report to catalog when workflow ends. */ if(catalog_reporting_on){ makeflow_catalog_summary(d, project,batch_queue_type,start); } if(makeflow_abort_flag) { makeflow_abort_all(d); } else { if(!makeflow_failed_flag && makeflow_gc_method != MAKEFLOW_GC_NONE) { makeflow_gc(d,remote_queue,MAKEFLOW_GC_ALL,0,0); } } } /* Signal handler to catch abort signals. Note that permissible actions in signal handlers are very limited, so we emit a message to the terminal and update a global variable noticed by makeflow_run. */ static void handle_abort(int sig) { static int abort_count_to_exit = 5; abort_count_to_exit -= 1; int fd = open("/dev/tty", O_WRONLY); if (fd >= 0) { char buf[256]; snprintf(buf, sizeof(buf), "Received signal %d, will try to clean up remote resources. Send signal %d more times to force exit.\n", sig, abort_count_to_exit); write(fd, buf, strlen(buf)); close(fd); } if (abort_count_to_exit == 1) signal(sig, SIG_DFL); makeflow_abort_flag = 1; } static void set_archive_directory_string(char **archive_directory, char *option_arg) { if (*archive_directory != NULL) { // need to free archive directory to avoid memory leak since it has already been set once free(*archive_directory); } if (option_arg) { *archive_directory = xxstrdup(option_arg); } else { char *uid = xxmalloc(10); sprintf(uid, "%d", getuid()); *archive_directory = xxmalloc(sizeof(MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY) + 20 * sizeof(char)); sprintf(*archive_directory, "%s%s", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY, uid); free(uid); } } static void show_help_run(const char *cmd) { printf("Use: %s [options] <dagfile>\n", cmd); printf("Frequently used options:\n\n"); printf(" %-30s Clean up: remove logfile and all targets. Optional specification [intermediates, outputs, cache] removes only the indicated files.\n", "-c,--clean=<type>"); printf(" %-30s Batch system type: (default is local)\n", "-T,--batch-type=<type>"); printf(" %-30s %s\n\n", "", batch_queue_type_string()); printf("Other options are:\n"); printf(" %-30s Advertise the master information to a catalog server.\n", "-a,--advertise"); printf(" %-30s Specify path to Amazon credentials (for use with -T amazon)\n", "--amazon-credentials"); printf(" %-30s Specify amazon-ami (for use with -T amazon)\n", "--amazon-ami"); printf(" %-30s Disable the check for AFS. (experts only.)\n", "-A,--disable-afs-check"); printf(" %-30s Add these options to all batch submit files.\n", "-B,--batch-options=<options>"); printf(" %-30s Set catalog server to <catalog>. Format: HOSTNAME:PORT \n", "-C,--catalog-server=<catalog>"); printf(" %-30s Enable debugging for this subsystem\n", "-d,--debug=<subsystem>"); printf(" %-30s Write summary of workflow to this file upon success or failure.\n", "-f,--summary-log=<file>"); printf(" %-30s Work Queue fast abort multiplier. (default is deactivated)\n", "-F,--wq-fast-abort=<#>"); printf(" %-30s Show this help screen.\n", "-h,--help"); printf(" %-30s Max number of local jobs to run at once. (default is # of cores)\n", "-j,--max-local=<#>"); printf(" %-30s Max number of remote jobs to run at once.\n", "-J,--max-remote=<#>"); printf(" (default %d for -Twq, %d otherwise.)\n", 10*MAX_REMOTE_JOBS_DEFAULT, MAX_REMOTE_JOBS_DEFAULT ); printf(" %-30s Use this file for the makeflow log. (default is X.makeflowlog)\n", "-l,--makeflow-log=<logfile>"); printf(" %-30s Use this file for the batch system log. (default is X.<type>log)\n", "-L,--batch-log=<logfile>"); printf(" %-30s Send summary of workflow to this email address upon success or failure.\n", "-m,--email=<email>"); printf(" %-30s Use this file as a mountlist.\n", " --mounts=<mountfile>"); printf(" %-30s Use this dir as the cache for file dependencies.\n", " --cache=<cache_dir>"); printf(" %-30s Set the project name to <project>\n", "-N,--project-name=<project>"); printf(" %-30s Send debugging to this file. (can also be :stderr, :stdout, :syslog, or :journal)\n", "-o,--debug-file=<file>"); printf(" %-30s Rotate debug file once it reaches this size.\n", " --debug-rotate-max=<bytes>"); printf(" %-30s Password file for authenticating workers.\n", " --password"); printf(" %-30s Port number to use with Work Queue. (default is %d, 0=arbitrary)\n", "-p,--port=<port>", WORK_QUEUE_DEFAULT_PORT); printf(" %-30s Priority. Higher the value, higher the priority.\n", "-P,--priority=<integer>"); printf(" %-30s Automatically retry failed batch jobs up to %d times.\n", "-R,--retry", makeflow_retry_max); printf(" %-30s Automatically retry failed batch jobs up to n times.\n", "-r,--retry-count=<n>"); printf(" %-30s Wait for output files to be created upto n seconds (e.g., to deal with NFS semantics).\n", " --wait-for-files-upto=<n>"); printf(" %-30s Time to retry failed batch job submission. (default is %ds)\n", "-S,--submission-timeout=<#>", makeflow_submit_timeout); printf(" %-30s Work Queue keepalive timeout. (default is %ds)\n", "-t,--wq-keepalive-timeout=<#>", WORK_QUEUE_DEFAULT_KEEPALIVE_TIMEOUT); printf(" %-30s Work Queue keepalive interval. (default is %ds)\n", "-u,--wq-keepalive-interval=<#>", WORK_QUEUE_DEFAULT_KEEPALIVE_INTERVAL); printf(" %-30s Umbrella binary for running every rule in a makeflow.\n", " --umbrella-binary=<file>"); printf(" %-30s Umbrella log file prefix for running every rule in a makeflow. (default is <makefilename>.umbrella.log)\n", " --umbrella-log-prefix=<string>"); printf(" %-30s Umbrella execution mode for running every rule in a makeflow. (default is local)\n", " --umbrella-mode=<mode>"); printf(" %-30s Umbrella spec for running every rule in a makeflow.\n", " --umbrella-spec=<file>"); printf(" %-30s Show version string\n", "-v,--version"); printf(" %-30s Work Queue scheduling algorithm. (time|files|fcfs)\n", "-W,--wq-schedule=<mode>"); printf(" %-30s Working directory for the batch system.\n", " --working-dir=<dir|url>"); printf(" %-30s Wrap all commands with this prefix.\n", " --wrapper=<cmd>"); printf(" %-30s Wrapper command requires this input file.\n", " --wrapper-input=<cmd>"); printf(" %-30s Wrapper command produces this output file.\n", " --wrapper-output=<cmd>"); printf(" %-30s Change directory: chdir to enable executing the Makefile in other directory.\n", "-X,--change-directory"); printf(" %-30s Force failure on zero-length output files \n", "-z,--zero-length-error"); printf(" %-30s Select port at random and write it to this file.\n", "-Z,--port-file=<file>"); printf(" %-30s Disable batch system caching. (default is false)\n", " --disable-cache"); printf(" %-30s Add node id symbol tags in the makeflow log. (default is false)\n", " --log-verbose"); printf(" %-30s Run each task with a container based on this docker image.\n", "--docker=<image>"); printf(" %-30s Load docker image from the tar file.\n", "--docker-tar=<tar file>"); printf(" %-30s Indicate user trusts inputs exist.\n", "--skip-file-check"); printf(" %-30s Use Parrot to restrict access to the given inputs/outputs.\n", "--enforcement"); printf(" %-30s Path to parrot_run (defaults to current directory).\n", "--parrot-path=<path>"); printf(" %-30s Indicate preferred master connection. Choose one of by_ip or by_hostname. (default is by_ip)\n", "--work-queue-preferred-connection"); printf(" %-30s Use JX format rather than Make-style format for the input file.\n", "--jx"); printf(" %-30s Evaluate the JX input in the given context.\n", "--jx-context"); printf(" %-30s Wrap execution of all rules in a singularity container.\n","--singularity=<image>"); printf(" %-30s Assume the given directory is a shared filesystem accessible to all workers.\n", "--shared-fs"); printf(" %-30s Archive results of makeflow in specified directory (default directory is %s)\n", "--archive=<dir>", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY); printf(" %-30s Read/Use archived results of makeflow in specified directory, will not write to archive (default directory is %s)\n", "--archive-read=<dir>", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY); printf(" %-30s Write archived results of makeflow in specified directory, will not read/use archived data (default directory is %s)\n", "--archive-write=<dir>", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY); printf(" %-30s Indicate the host name of preferred mesos master.\n", "--mesos-master=<hostname:port>"); printf(" %-30s Indicate the path to mesos python2 site-packages.\n", "--mesos-path=<path>"); printf(" %-30s Indicate the linking libraries for running mesos.\n", "--mesos-preload=<path>"); printf("\n*Monitor Options:\n\n"); printf(" %-30s Enable the resource monitor, and write the monitor logs to <dir>.\n", "--monitor=<dir>"); printf(" %-30s Set monitor interval to <#> seconds. (default is 1 second)\n", " --monitor-interval=<#>"); printf(" %-30s Enable monitor time series. (default is disabled)\n", " --monitor-with-time-series"); printf(" %-30s Enable monitoring of openened files. (default is disabled)\n", " --monitor-with-opened-files"); printf(" %-30s Format for monitor logs. (default %s)\n", " --monitor-log-fmt=<fmt>", DEFAULT_MONITOR_LOG_FORMAT); } int main(int argc, char *argv[]) { int c; const char *dagfile; char *change_dir = NULL; char *batchlogfilename = NULL; const char *batch_submit_options = getenv("BATCH_OPTIONS"); makeflow_clean_depth clean_mode = MAKEFLOW_CLEAN_NONE; char *email_summary_to = NULL; int explicit_remote_jobs_max = 0; int explicit_local_jobs_max = 0; char *logfilename = NULL; int port_set = 0; timestamp_t runtime = 0; int skip_afs_check = 0; int should_read_archive = 0; int should_write_to_archive = 0; timestamp_t time_completed = 0; const char *work_queue_keepalive_interval = NULL; const char *work_queue_keepalive_timeout = NULL; const char *work_queue_master_mode = "standalone"; const char *work_queue_port_file = NULL; double wq_option_fast_abort_multiplier = -1.0; const char *amazon_credentials = NULL; const char *amazon_ami = NULL; const char *priority = NULL; char *work_queue_password = NULL; char *wq_wait_queue_size = 0; int did_explicit_auth = 0; char *chirp_tickets = NULL; char *working_dir = NULL; char *work_queue_preferred_connection = NULL; char *write_summary_to = NULL; char *s; char *log_dir = NULL; char *log_format = NULL; char *archive_directory = NULL; category_mode_t allocation_mode = CATEGORY_ALLOCATION_MODE_FIXED; shared_fs_list = list_create(); char *mesos_master = "127.0.0.1:5050/"; char *mesos_path = NULL; char *mesos_preload = NULL; random_init(); debug_config(argv[0]); s = getenv("MAKEFLOW_BATCH_QUEUE_TYPE"); if(s) { batch_queue_type = batch_queue_type_from_string(s); if(batch_queue_type == BATCH_QUEUE_TYPE_UNKNOWN) { fprintf(stderr, "makeflow: unknown batch queue type: %s (from $MAKEFLOW_BATCH_QUEUE_TYPE)\n", s); return 1; } } s = getenv("WORK_QUEUE_MASTER_MODE"); if(s) { work_queue_master_mode = s; } s = getenv("WORK_QUEUE_NAME"); if(s) { project = xxstrdup(s); } s = getenv("WORK_QUEUE_FAST_ABORT_MULTIPLIER"); if(s) { wq_option_fast_abort_multiplier = atof(s); } enum { LONG_OPT_AUTH = UCHAR_MAX+1, LONG_OPT_CACHE, LONG_OPT_DEBUG_ROTATE_MAX, LONG_OPT_DISABLE_BATCH_CACHE, LONG_OPT_DOT_CONDENSE, LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME, LONG_OPT_GC_SIZE, LONG_OPT_MONITOR, LONG_OPT_MONITOR_INTERVAL, LONG_OPT_MONITOR_LOG_NAME, LONG_OPT_MONITOR_OPENED_FILES, LONG_OPT_MONITOR_TIME_SERIES, LONG_OPT_MOUNTS, LONG_OPT_PASSWORD, LONG_OPT_TICKETS, LONG_OPT_VERBOSE_PARSING, LONG_OPT_LOG_VERBOSE_MODE, LONG_OPT_WORKING_DIR, LONG_OPT_PREFERRED_CONNECTION, LONG_OPT_WQ_WAIT_FOR_WORKERS, LONG_OPT_WRAPPER, LONG_OPT_WRAPPER_INPUT, LONG_OPT_WRAPPER_OUTPUT, LONG_OPT_DOCKER, LONG_OPT_DOCKER_TAR, LONG_OPT_AMAZON_CREDENTIALS, LONG_OPT_AMAZON_AMI, LONG_OPT_JX, LONG_OPT_JX_CONTEXT, LONG_OPT_SKIP_FILE_CHECK, LONG_OPT_UMBRELLA_BINARY, LONG_OPT_UMBRELLA_LOG_PREFIX, LONG_OPT_UMBRELLA_MODE, LONG_OPT_UMBRELLA_SPEC, LONG_OPT_ALLOCATION_MODE, LONG_OPT_ENFORCEMENT, LONG_OPT_PARROT_PATH, LONG_OPT_SINGULARITY, LONG_OPT_SHARED_FS, LONG_OPT_ARCHIVE, LONG_OPT_ARCHIVE_READ_ONLY, LONG_OPT_ARCHIVE_WRITE_ONLY, LONG_OPT_MESOS_MASTER, LONG_OPT_MESOS_PATH, LONG_OPT_MESOS_PRELOAD }; static const struct option long_options_run[] = { {"advertise", no_argument, 0, 'a'}, {"allocation", required_argument, 0, LONG_OPT_ALLOCATION_MODE}, {"auth", required_argument, 0, LONG_OPT_AUTH}, {"batch-log", required_argument, 0, 'L'}, {"batch-options", required_argument, 0, 'B'}, {"batch-type", required_argument, 0, 'T'}, {"cache", required_argument, 0, LONG_OPT_CACHE}, {"catalog-server", required_argument, 0, 'C'}, {"clean", optional_argument, 0, 'c'}, {"debug", required_argument, 0, 'd'}, {"debug-file", required_argument, 0, 'o'}, {"debug-rotate-max", required_argument, 0, LONG_OPT_DEBUG_ROTATE_MAX}, {"disable-afs-check", no_argument, 0, 'A'}, {"disable-cache", no_argument, 0, LONG_OPT_DISABLE_BATCH_CACHE}, {"email", required_argument, 0, 'm'}, {"wait-for-files-upto", required_argument, 0, LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME}, {"gc", required_argument, 0, 'g'}, {"gc-size", required_argument, 0, LONG_OPT_GC_SIZE}, {"gc-count", required_argument, 0, 'G'}, {"wait-for-files-upto", required_argument, 0, LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME}, {"help", no_argument, 0, 'h'}, {"makeflow-log", required_argument, 0, 'l'}, {"max-local", required_argument, 0, 'j'}, {"max-remote", required_argument, 0, 'J'}, {"monitor", required_argument, 0, LONG_OPT_MONITOR}, {"monitor-interval", required_argument, 0, LONG_OPT_MONITOR_INTERVAL}, {"monitor-log-name", required_argument, 0, LONG_OPT_MONITOR_LOG_NAME}, {"monitor-with-opened-files", no_argument, 0, LONG_OPT_MONITOR_OPENED_FILES}, {"monitor-with-time-series", no_argument, 0, LONG_OPT_MONITOR_TIME_SERIES}, {"mounts", required_argument, 0, LONG_OPT_MOUNTS}, {"password", required_argument, 0, LONG_OPT_PASSWORD}, {"port", required_argument, 0, 'p'}, {"port-file", required_argument, 0, 'Z'}, {"priority", required_argument, 0, 'P'}, {"project-name", required_argument, 0, 'N'}, {"retry", no_argument, 0, 'R'}, {"retry-count", required_argument, 0, 'r'}, {"shared-fs", required_argument, 0, LONG_OPT_SHARED_FS}, {"show-output", no_argument, 0, 'O'}, {"submission-timeout", required_argument, 0, 'S'}, {"summary-log", required_argument, 0, 'f'}, {"tickets", required_argument, 0, LONG_OPT_TICKETS}, {"version", no_argument, 0, 'v'}, {"log-verbose", no_argument, 0, LONG_OPT_LOG_VERBOSE_MODE}, {"working-dir", required_argument, 0, LONG_OPT_WORKING_DIR}, {"skip-file-check", no_argument, 0, LONG_OPT_SKIP_FILE_CHECK}, {"umbrella-binary", required_argument, 0, LONG_OPT_UMBRELLA_BINARY}, {"umbrella-log-prefix", required_argument, 0, LONG_OPT_UMBRELLA_LOG_PREFIX}, {"umbrella-mode", required_argument, 0, LONG_OPT_UMBRELLA_MODE}, {"umbrella-spec", required_argument, 0, LONG_OPT_UMBRELLA_SPEC}, {"work-queue-preferred-connection", required_argument, 0, LONG_OPT_PREFERRED_CONNECTION}, {"wq-estimate-capacity", no_argument, 0, 'E'}, {"wq-fast-abort", required_argument, 0, 'F'}, {"wq-keepalive-interval", required_argument, 0, 'u'}, {"wq-keepalive-timeout", required_argument, 0, 't'}, {"wq-schedule", required_argument, 0, 'W'}, {"wq-wait-queue-size", required_argument, 0, LONG_OPT_WQ_WAIT_FOR_WORKERS}, {"wrapper", required_argument, 0, LONG_OPT_WRAPPER}, {"wrapper-input", required_argument, 0, LONG_OPT_WRAPPER_INPUT}, {"wrapper-output", required_argument, 0, LONG_OPT_WRAPPER_OUTPUT}, {"zero-length-error", no_argument, 0, 'z'}, {"change-directory", required_argument, 0, 'X'}, {"docker", required_argument, 0, LONG_OPT_DOCKER}, {"docker-tar", required_argument, 0, LONG_OPT_DOCKER_TAR}, {"amazon-credentials", required_argument, 0, LONG_OPT_AMAZON_CREDENTIALS}, {"amazon-ami", required_argument, 0, LONG_OPT_AMAZON_AMI}, {"jx", no_argument, 0, LONG_OPT_JX}, {"jx-context", required_argument, 0, LONG_OPT_JX_CONTEXT}, {"enforcement", no_argument, 0, LONG_OPT_ENFORCEMENT}, {"parrot-path", required_argument, 0, LONG_OPT_PARROT_PATH}, {"singularity", required_argument, 0, LONG_OPT_SINGULARITY}, {"archive", optional_argument, 0, LONG_OPT_ARCHIVE}, {"archive-read", optional_argument, 0, LONG_OPT_ARCHIVE_READ_ONLY}, {"archive-write", optional_argument, 0, LONG_OPT_ARCHIVE_WRITE_ONLY}, {"mesos-master", required_argument, 0, LONG_OPT_MESOS_MASTER}, {"mesos-path", required_argument, 0, LONG_OPT_MESOS_PATH}, {"mesos-preload", required_argument, 0, LONG_OPT_MESOS_PRELOAD}, {0, 0, 0, 0} }; static const char option_string_run[] = "aAB:c::C:d:Ef:F:g:G:hj:J:l:L:m:M:N:o:Op:P:r:RS:t:T:u:vW:X:zZ:"; while((c = getopt_long(argc, argv, option_string_run, long_options_run, NULL)) >= 0) { switch (c) { case 'a': work_queue_master_mode = "catalog"; break; case 'A': skip_afs_check = 1; break; case 'B': batch_submit_options = optarg; break; case 'c': clean_mode = MAKEFLOW_CLEAN_ALL; if(optarg){ if(strcasecmp(optarg, "intermediates") == 0){ clean_mode = MAKEFLOW_CLEAN_INTERMEDIATES; } else if(strcasecmp(optarg, "outputs") == 0){ clean_mode = MAKEFLOW_CLEAN_OUTPUTS; } else if(strcasecmp(optarg, "cache") == 0){ clean_mode = MAKEFLOW_CLEAN_CACHE; } else if(strcasecmp(optarg, "all") != 0){ fprintf(stderr, "makeflow: unknown clean option %s", optarg); exit(1); } } break; case 'C': setenv("CATALOG_HOST", optarg, 1); break; case 'd': debug_flags_set(optarg); break; case 'E': // This option is deprecated. Capacity estimation is now on by default. break; case LONG_OPT_AUTH: if (!auth_register_byname(optarg)) fatal("could not register authentication method `%s': %s", optarg, strerror(errno)); did_explicit_auth = 1; break; case LONG_OPT_TICKETS: chirp_tickets = strdup(optarg); break; case 'f': write_summary_to = xxstrdup(optarg); break; case 'F': wq_option_fast_abort_multiplier = atof(optarg); break; case 'g': if(strcasecmp(optarg, "none") == 0) { makeflow_gc_method = MAKEFLOW_GC_NONE; } else if(strcasecmp(optarg, "ref_count") == 0) { makeflow_gc_method = MAKEFLOW_GC_COUNT; if(makeflow_gc_count < 0) makeflow_gc_count = 16; /* Try to collect at most 16 files. */ } else if(strcasecmp(optarg, "on_demand") == 0) { makeflow_gc_method = MAKEFLOW_GC_ON_DEMAND; if(makeflow_gc_count < 0) makeflow_gc_count = 16; /* Try to collect at most 16 files. */ } else if(strcasecmp(optarg, "all") == 0) { makeflow_gc_method = MAKEFLOW_GC_ALL; if(makeflow_gc_count < 0) makeflow_gc_count = 1 << 14; /* Inode threshold of 2^14. */ } else { fprintf(stderr, "makeflow: invalid garbage collection method: %s\n", optarg); exit(1); } break; case LONG_OPT_GC_SIZE: makeflow_gc_size = string_metric_parse(optarg); break; case 'G': makeflow_gc_count = atoi(optarg); break; case LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME: file_creation_patience_wait_time = MAX(0,atoi(optarg)); break; case 'h': show_help_run(argv[0]); return 0; case 'j': explicit_local_jobs_max = atoi(optarg); break; case 'J': explicit_remote_jobs_max = atoi(optarg); break; case 'l': logfilename = xxstrdup(optarg); break; case 'L': batchlogfilename = xxstrdup(optarg); break; case 'm': email_summary_to = xxstrdup(optarg); break; case LONG_OPT_MONITOR: if (!monitor) monitor = makeflow_monitor_create(); if(log_dir) free(log_dir); log_dir = xxstrdup(optarg); break; case LONG_OPT_MONITOR_INTERVAL: if (!monitor) monitor = makeflow_monitor_create(); monitor->interval = atoi(optarg); break; case LONG_OPT_MONITOR_TIME_SERIES: if (!monitor) monitor = makeflow_monitor_create(); monitor->enable_time_series = 1; break; case LONG_OPT_MONITOR_OPENED_FILES: if (!monitor) monitor = makeflow_monitor_create(); monitor->enable_list_files = 1; break; case LONG_OPT_MONITOR_LOG_NAME: if (!monitor) monitor = makeflow_monitor_create(); if(log_format) free(log_format); log_format = xxstrdup(optarg); break; case LONG_OPT_CACHE: mount_cache = xxstrdup(optarg); break; case LONG_OPT_MOUNTS: mountfile = xxstrdup(optarg); break; case LONG_OPT_AMAZON_CREDENTIALS: amazon_credentials = xxstrdup(optarg); break; case LONG_OPT_AMAZON_AMI: amazon_ami = xxstrdup(optarg); break; case 'M': case 'N': free(project); project = xxstrdup(optarg); work_queue_master_mode = "catalog"; catalog_reporting_on = 1; //set to true break; case 'o': debug_config_file(optarg); break; case 'p': port_set = 1; port = atoi(optarg); break; case 'P': priority = optarg; break; case 'r': makeflow_retry_flag = 1; makeflow_retry_max = atoi(optarg); break; case 'R': makeflow_retry_flag = 1; break; case 'S': makeflow_submit_timeout = atoi(optarg); break; case 't': work_queue_keepalive_timeout = optarg; break; case 'T': batch_queue_type = batch_queue_type_from_string(optarg); if(batch_queue_type == BATCH_QUEUE_TYPE_UNKNOWN) { fprintf(stderr, "makeflow: unknown batch queue type: %s\n", optarg); return 1; } break; case 'u': work_queue_keepalive_interval = optarg; break; case 'v': cctools_version_print(stdout, argv[0]); return 0; case 'W': if(!strcmp(optarg, "files")) { wq_option_scheduler = WORK_QUEUE_SCHEDULE_FILES; } else if(!strcmp(optarg, "time")) { wq_option_scheduler = WORK_QUEUE_SCHEDULE_TIME; } else if(!strcmp(optarg, "fcfs")) { wq_option_scheduler = WORK_QUEUE_SCHEDULE_FCFS; } else { fprintf(stderr, "makeflow: unknown scheduling mode %s\n", optarg); return 1; } break; case 'z': output_len_check = 1; break; case 'Z': work_queue_port_file = optarg; port = 0; port_set = 1; //WQ is going to set the port, so we continue as if already set. break; case LONG_OPT_PASSWORD: if(copy_file_to_buffer(optarg, &work_queue_password, NULL) < 0) { fprintf(stderr, "makeflow: couldn't open %s: %s\n", optarg, strerror(errno)); return 1; } break; case LONG_OPT_DISABLE_BATCH_CACHE: cache_mode = 0; break; case LONG_OPT_WQ_WAIT_FOR_WORKERS: wq_wait_queue_size = optarg; break; case LONG_OPT_WORKING_DIR: free(working_dir); working_dir = xxstrdup(optarg); break; case LONG_OPT_PREFERRED_CONNECTION: free(work_queue_preferred_connection); work_queue_preferred_connection = xxstrdup(optarg); break; case LONG_OPT_DEBUG_ROTATE_MAX: debug_config_file_size(string_metric_parse(optarg)); break; case LONG_OPT_LOG_VERBOSE_MODE: log_verbose_mode = 1; break; case LONG_OPT_WRAPPER: if(!wrapper) wrapper = makeflow_wrapper_create(); makeflow_wrapper_add_command(wrapper, optarg); break; case LONG_OPT_WRAPPER_INPUT: if(!wrapper) wrapper = makeflow_wrapper_create(); makeflow_wrapper_add_input_file(wrapper, optarg); break; case LONG_OPT_WRAPPER_OUTPUT: if(!wrapper) wrapper = makeflow_wrapper_create(); makeflow_wrapper_add_output_file(wrapper, optarg); break; case LONG_OPT_SHARED_FS: assert(shared_fs_list); if (optarg[0] != '/') fatal("Shared fs must be specified as an absolute path"); list_push_head(shared_fs_list, xxstrdup(optarg)); break; case LONG_OPT_DOCKER: if(!wrapper) wrapper = makeflow_wrapper_create(); container_mode = CONTAINER_MODE_DOCKER; container_image = xxstrdup(optarg); break; case LONG_OPT_SKIP_FILE_CHECK: skip_file_check = 1; break; case LONG_OPT_DOCKER_TAR: container_image_tar = xxstrdup(optarg); break; case LONG_OPT_SINGULARITY: if(!wrapper) wrapper = makeflow_wrapper_create(); container_mode = CONTAINER_MODE_SINGULARITY; container_image = xxstrdup(optarg); break; case LONG_OPT_ALLOCATION_MODE: if(!strcmp(optarg, "throughput")) { allocation_mode = CATEGORY_ALLOCATION_MODE_MAX_THROUGHPUT; } else if(!strcmp(optarg, "waste")) { allocation_mode = CATEGORY_ALLOCATION_MODE_MIN_WASTE; } else if(!strcmp(optarg, "fixed")) { allocation_mode = CATEGORY_ALLOCATION_MODE_FIXED; } else { fatal("Allocation mode '%s' is not valid. Use one of: throughput waste fixed"); } case LONG_OPT_JX: jx_input = 1; break; case LONG_OPT_JX_CONTEXT: jx_context = xxstrdup(optarg); break; case LONG_OPT_UMBRELLA_BINARY: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_binary(umbrella, (const char *)xxstrdup(optarg)); break; case LONG_OPT_UMBRELLA_LOG_PREFIX: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_log_prefix(umbrella, (const char *)xxstrdup(optarg)); break; case LONG_OPT_UMBRELLA_MODE: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_mode(umbrella, (const char *)xxstrdup(optarg)); break; case LONG_OPT_UMBRELLA_SPEC: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_spec(umbrella, (const char *)xxstrdup(optarg)); case LONG_OPT_MESOS_MASTER: mesos_master = xxstrdup(optarg); break; case LONG_OPT_MESOS_PATH: mesos_path = xxstrdup(optarg); break; case LONG_OPT_MESOS_PRELOAD: mesos_preload = xxstrdup(optarg); break; case LONG_OPT_ARCHIVE: should_read_archive = 1; should_write_to_archive = 1; set_archive_directory_string(&archive_directory, optarg); break; case LONG_OPT_ARCHIVE_READ_ONLY: should_read_archive = 1; set_archive_directory_string(&archive_directory, optarg); break; case LONG_OPT_ARCHIVE_WRITE_ONLY: should_write_to_archive = 1; set_archive_directory_string(&archive_directory, optarg); break; default: show_help_run(argv[0]); return 1; case 'X': change_dir = optarg; break; case LONG_OPT_ENFORCEMENT: if(!enforcer) enforcer = makeflow_wrapper_create(); break; case LONG_OPT_PARROT_PATH: parrot_path = xxstrdup(optarg); break; } } cctools_version_debug(D_MAKEFLOW_RUN, argv[0]); if(!did_explicit_auth) auth_register_all(); if(chirp_tickets) { auth_ticket_load(chirp_tickets); free(chirp_tickets); } else { auth_ticket_load(NULL); } if (enforcer && umbrella) { fatal("enforcement and Umbrella are mutually exclusive\n"); } if((argc - optind) != 1) { int rv = access("./Makeflow", R_OK); if(rv < 0) { fprintf(stderr, "makeflow: No makeflow specified and file \"./Makeflow\" could not be found.\n"); fprintf(stderr, "makeflow: Run \"%s -h\" for help with options.\n", argv[0]); return 1; } dagfile = "./Makeflow"; } else { dagfile = argv[optind]; } if(batch_queue_type == BATCH_QUEUE_TYPE_WORK_QUEUE) { if(strcmp(work_queue_master_mode, "catalog") == 0 && project == NULL) { fprintf(stderr, "makeflow: Makeflow running in catalog mode. Please use '-N' option to specify the name of this project.\n"); fprintf(stderr, "makeflow: Run \"makeflow -h\" for help with options.\n"); return 1; } // Use Work Queue default port in standalone mode when port is not // specified with -p option. In Work Queue catalog mode, Work Queue // would choose an arbitrary port when port is not explicitly specified. if(!port_set && strcmp(work_queue_master_mode, "standalone") == 0) { port_set = 1; port = WORK_QUEUE_DEFAULT_PORT; } if(port_set) { char *value; value = string_format("%d", port); setenv("WORK_QUEUE_PORT", value, 1); free(value); } } if(!logfilename) logfilename = string_format("%s.makeflowlog", dagfile); printf("parsing %s...\n",dagfile); struct dag *d; if (jx_input) { struct jx *t = NULL; if (jx_context) { printf("using JX context %s\n", jx_context); t = jx_parse_file(jx_context); if (!t) fatal("couldn't parse context file %s\n",jx_context); } struct jx *ctx = jx_eval(t, NULL); jx_delete(t); t = jx_parse_file(dagfile); struct jx *dag = jx_eval(t, ctx); jx_delete(t); jx_delete(ctx); d = dag_from_jx(dag); jx_delete(dag); } else { d = dag_from_file(dagfile); } if(!d) { fatal("makeflow: couldn't load %s: %s\n", dagfile, strerror(errno)); } d->allocation_mode = allocation_mode; // Makeflows running LOCAL batch type have only one queue that behaves as if remote // This forces -J vs -j to behave correctly if(batch_queue_type == BATCH_QUEUE_TYPE_LOCAL) { explicit_remote_jobs_max = explicit_local_jobs_max; } if(explicit_local_jobs_max) { local_jobs_max = explicit_local_jobs_max; } else { local_jobs_max = load_average_get_cpus(); } if(explicit_remote_jobs_max) { remote_jobs_max = explicit_remote_jobs_max; } else { if(batch_queue_type == BATCH_QUEUE_TYPE_LOCAL) { remote_jobs_max = load_average_get_cpus(); } else if(batch_queue_type == BATCH_QUEUE_TYPE_WORK_QUEUE) { remote_jobs_max = 10 * MAX_REMOTE_JOBS_DEFAULT; } else { remote_jobs_max = MAX_REMOTE_JOBS_DEFAULT; } } s = getenv("MAKEFLOW_MAX_REMOTE_JOBS"); if(s) { remote_jobs_max = MIN(remote_jobs_max, atoi(s)); } s = getenv("MAKEFLOW_MAX_LOCAL_JOBS"); if(s) { int n = atoi(s); local_jobs_max = MIN(local_jobs_max, n); if(batch_queue_type == BATCH_QUEUE_TYPE_LOCAL) { remote_jobs_max = MIN(local_jobs_max, n); } } remote_queue = batch_queue_create(batch_queue_type); if(!remote_queue) { fprintf(stderr, "makeflow: couldn't create batch queue.\n"); if(port != 0) fprintf(stderr, "makeflow: perhaps port %d is already in use?\n", port); exit(EXIT_FAILURE); } if(!batchlogfilename) { if(batch_queue_supports_feature(remote_queue, "batch_log_name")){ batchlogfilename = string_format(batch_queue_supports_feature(remote_queue, "batch_log_name"), dagfile); } else { batchlogfilename = string_format("%s.batchlog", dagfile); } } if(batch_queue_type == BATCH_QUEUE_TYPE_MESOS) { batch_queue_set_option(remote_queue, "mesos-path", mesos_path); batch_queue_set_option(remote_queue, "mesos-master", mesos_master); batch_queue_set_option(remote_queue, "mesos-preload", mesos_preload); } if(batch_queue_type == BATCH_QUEUE_TYPE_DRYRUN) { FILE *file = fopen(batchlogfilename,"w"); if(!file) fatal("unable to open log file %s: %s\n", batchlogfilename, strerror(errno)); fprintf(file, "#!/bin/sh\n"); fprintf(file, "set -x\n"); fprintf(file, "set -e\n"); fprintf(file, "\n# %s version %s (released %s)\n\n", argv[0], CCTOOLS_VERSION, CCTOOLS_RELEASE_DATE); fclose(file); } batch_queue_set_logfile(remote_queue, batchlogfilename); batch_queue_set_option(remote_queue, "batch-options", batch_submit_options); batch_queue_set_option(remote_queue, "skip-afs-check", skip_afs_check ? "yes" : "no"); batch_queue_set_option(remote_queue, "password", work_queue_password); batch_queue_set_option(remote_queue, "master-mode", work_queue_master_mode); batch_queue_set_option(remote_queue, "name", project); batch_queue_set_option(remote_queue, "priority", priority); batch_queue_set_option(remote_queue, "keepalive-interval", work_queue_keepalive_interval); batch_queue_set_option(remote_queue, "keepalive-timeout", work_queue_keepalive_timeout); batch_queue_set_option(remote_queue, "caching", cache_mode ? "yes" : "no"); batch_queue_set_option(remote_queue, "wait-queue-size", wq_wait_queue_size); batch_queue_set_option(remote_queue, "amazon-credentials", amazon_credentials); batch_queue_set_option(remote_queue, "amazon-ami", amazon_ami); batch_queue_set_option(remote_queue, "working-dir", working_dir); batch_queue_set_option(remote_queue, "master-preferred-connection", work_queue_preferred_connection); char *fa_multiplier = string_format("%f", wq_option_fast_abort_multiplier); batch_queue_set_option(remote_queue, "fast-abort", fa_multiplier); free(fa_multiplier); /* Do not create a local queue for systems where local and remote are the same. */ if(!batch_queue_supports_feature(remote_queue, "local_job_queue")) { local_queue = 0; } else { local_queue = batch_queue_create(BATCH_QUEUE_TYPE_LOCAL); if(!local_queue) { fatal("couldn't create local job queue."); } } /* Remote storage modes do not (yet) support measuring storage for garbage collection. */ if(makeflow_gc_method == MAKEFLOW_GC_SIZE && !batch_queue_supports_feature(remote_queue, "gc_size")) { makeflow_gc_method = MAKEFLOW_GC_ALL; } /* Set dag_node->umbrella_spec */ if(!clean_mode) { struct dag_node *cur; cur = d->nodes; while(cur) { struct dag_variable_lookup_set s = {d, cur->category, cur, NULL}; char *spec = NULL; spec = dag_variable_lookup_string("SPEC", &s); if(spec) { debug(D_MAKEFLOW_RUN, "setting dag_node->umbrella_spec (rule %d) from the makefile ...\n", cur->nodeid); dag_node_set_umbrella_spec(cur, xxstrdup(spec)); } else if(umbrella && umbrella->spec) { debug(D_MAKEFLOW_RUN, "setting dag_node->umbrella_spec (rule %d) from the --umbrella_spec option ...\n", cur->nodeid); dag_node_set_umbrella_spec(cur, umbrella->spec); } free(spec); cur = cur->next; } debug(D_MAKEFLOW_RUN, "makeflow_wrapper_umbrella_preparation...\n"); // When the user specifies umbrella specs in a makefile, but does not use any `--umbrella...` option, // an umbrella wrapper was created to hold the default values for umbrella-related setttings such as // log_prefix and default umbrella execution engine. if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_preparation(umbrella, d); } if(enforcer) { makeflow_wrapper_enforcer_init(enforcer, parrot_path); } makeflow_parse_input_outputs(d); makeflow_prepare_nested_jobs(d); if (change_dir) chdir(change_dir); /* Prepare the input files specified in the mountfile. */ if(mountfile && !clean_mode) { /* check the validity of the mountfile and load the info from the mountfile into the dag */ printf("checking the consistency of the mountfile ...\n"); if(makeflow_mounts_parse_mountfile(mountfile, d)) { fprintf(stderr, "Failed to parse the mountfile: %s.\n", mountfile); free(mountfile); return -1; } free(mountfile); use_mountfile = 1; } printf("checking %s for consistency...\n",dagfile); if(!makeflow_check(d)) { exit(EXIT_FAILURE); } if(!makeflow_check_batch_consistency(d) && clean_mode == MAKEFLOW_CLEAN_NONE) { exit(EXIT_FAILURE); } printf("%s has %d rules.\n",dagfile,d->nodeid_counter); setlinebuf(stdout); setlinebuf(stderr); if(mount_cache) d->cache_dir = mount_cache; /* In case when the user uses --cache option to specify the mount cache dir and the log file also has * a cache dir logged, these two dirs must be the same. Otherwise exit. */ if(makeflow_log_recover(d, logfilename, log_verbose_mode, remote_queue, clean_mode, skip_file_check )) { dag_mount_clean(d); exit(EXIT_FAILURE); } /* This check must happen after makeflow_log_recover which may load the cache_dir info into d->cache_dir. * This check must happen before makeflow_mount_install to guarantee that the program ends before any mount is copied if any target is invliad. */ if(use_mountfile) { if(makeflow_mount_check_target(d)) { dag_mount_clean(d); exit(EXIT_FAILURE); } } if(use_mountfile && !clean_mode) { if(makeflow_mounts_install(d)) { fprintf(stderr, "Failed to install the dependencies specified in the mountfile!\n"); dag_mount_clean(d); exit(EXIT_FAILURE); } } if(monitor) { if(!log_dir) fatal("Monitor mode was enabled, but a log output directory was not specified (use --monitor=<dir>)"); if(!log_format) log_format = xxstrdup(DEFAULT_MONITOR_LOG_FORMAT); if(monitor->interval < 1) fatal("Monitoring interval should be positive."); makeflow_prepare_for_monitoring(d, monitor, remote_queue, log_dir, log_format); free(log_dir); free(log_format); } struct dag_file *f = dag_file_lookup_or_create(d, batchlogfilename); makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXPECT); if(batch_queue_supports_feature(remote_queue, "batch_log_transactions")) { const char *transactions = batch_queue_get_option(remote_queue, "batch_log_transactions_name"); f = dag_file_lookup_or_create(d, transactions); makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXPECT); } if(clean_mode != MAKEFLOW_CLEAN_NONE) { printf("cleaning filesystem...\n"); if(makeflow_clean(d, remote_queue, clean_mode)) { fprintf(stderr, "Failed to clean up makeflow!\n"); exit(EXIT_FAILURE); } if(clean_mode == MAKEFLOW_CLEAN_ALL) { unlink(logfilename); } exit(0); } /* this func call guarantees the mount fields set up from the info of the makeflow log file are cleaned up * even if the user does not use --mounts or -c option. */ dag_mount_clean(d); printf("starting workflow....\n"); port = batch_queue_port(remote_queue); if(work_queue_port_file) opts_write_port_file(work_queue_port_file, port); if(port > 0) printf("listening for workers on port %d.\n", port); signal(SIGINT, handle_abort); signal(SIGQUIT, handle_abort); signal(SIGTERM, handle_abort); makeflow_log_started_event(d); runtime = timestamp_get(); if (container_mode == CONTAINER_MODE_DOCKER) { makeflow_wrapper_docker_init(wrapper, container_image, container_image_tar); }else if(container_mode == CONTAINER_MODE_SINGULARITY){ makeflow_wrapper_singularity_init(wrapper, container_image); } d->archive_directory = archive_directory; d->should_read_archive = should_read_archive; d->should_write_to_archive = should_write_to_archive; makeflow_run(d); time_completed = timestamp_get(); runtime = time_completed - runtime; if(local_queue) batch_queue_delete(local_queue); /* * Set the abort and failed flag for batch_job_mesos mode. * Since batch_queue_delete(struct batch_queue *q) will call * batch_queue_mesos_free(struct batch_queue *q), which is defined * in batch_job/src/batch_job_mesos.c. Then this function will check * the abort and failed status of the batch_queue and inform * the makeflow mesos scheduler. */ if (batch_queue_type == BATCH_QUEUE_TYPE_MESOS) { batch_queue_set_int_option(remote_queue, "batch-queue-abort-flag", (int)makeflow_abort_flag); batch_queue_set_int_option(remote_queue, "batch-queue-failed-flag", (int)makeflow_failed_flag); } batch_queue_delete(remote_queue); if(write_summary_to || email_summary_to) makeflow_summary_create(d, write_summary_to, email_summary_to, runtime, time_completed, argc, argv, dagfile, remote_queue, makeflow_abort_flag, makeflow_failed_flag ); /* XXX better to write created files to log, then delete those listed in log. */ if (container_mode == CONTAINER_MODE_DOCKER) { unlink(CONTAINER_DOCKER_SH); }else if(container_mode == CONTAINER_MODE_SINGULARITY){ unlink(CONTAINER_SINGULARITY_SH); } if(makeflow_abort_flag) { makeflow_log_aborted_event(d); fprintf(stderr, "workflow was aborted.\n"); exit(EXIT_FAILURE); } else if(makeflow_failed_flag) { makeflow_log_failed_event(d); fprintf(stderr, "workflow failed.\n"); exit(EXIT_FAILURE); } else { makeflow_log_completed_event(d); printf("nothing left to do.\n"); exit(EXIT_SUCCESS); } free(archive_directory); return 0; } /* vim: set noexpandtab tabstop=4: */
1
13,790
If these variables are only used in main function, move these to the beginning of that function. If there is a foreseeable reason to have them as global statics just let me know.
cooperative-computing-lab-cctools
c
@@ -40,12 +40,12 @@ namespace Microsoft.Sarif.Viewer { if (serviceProvider == null) { - throw new ArgumentNullException("serviceProvider"); + throw new ArgumentNullException(nameof(serviceProvider)); } if (region == null) { - throw new ArgumentNullException("location"); + throw new ArgumentNullException(nameof(region)); } m_serviceProvider = serviceProvider;
1
using System; using System.Diagnostics; using Microsoft.CodeAnalysis.Sarif; using Microsoft.VisualStudio.ComponentModelHost; using Microsoft.VisualStudio.Shell.Interop; using Microsoft.VisualStudio.Text; using Microsoft.VisualStudio.Text.Adornments; using Microsoft.VisualStudio.Text.Editor; using Microsoft.VisualStudio.Text.Tagging; using Microsoft.VisualStudio.TextManager.Interop; namespace Microsoft.Sarif.Viewer { /// <summary> /// This class represents an instance of "highlighed" line in the editor, holds necessary Shell objects and logic /// to managed lifecycle and appearance. /// </summary> public class ResultTextMarker { public const string DEFAULT_SELECTION_COLOR = "CodeAnalysisWarningSelection"; public const string KEYEVENT_SELECTION_COLOR = "CodeAnalysisKeyEventSelection"; public const string LINE_TRACE_SELECTION_COLOR = "CodeAnalysisLineTraceSelection"; public const string HOVER_SELECTION_COLOR = "CodeAnalysisCurrentStatementSelection"; private Region m_region; private IServiceProvider m_serviceProvider; private TrackingTagSpan<TextMarkerTag> m_marker; private SimpleTagger<TextMarkerTag> m_tagger; private ITrackingSpan m_trackingSpan; private IWpfTextView m_textView; private long? m_docCookie; public string FullFilePath { get; set; } public string Color { get; set; } /// <summary> /// fullFilePath may be null for global issues. /// </summary> public ResultTextMarker(IServiceProvider serviceProvider, Region region, string fullFilePath) { if (serviceProvider == null) { throw new ArgumentNullException("serviceProvider"); } if (region == null) { throw new ArgumentNullException("location"); } m_serviceProvider = serviceProvider; m_region = region; FullFilePath = fullFilePath; Color = DEFAULT_SELECTION_COLOR; } internal IVsWindowFrame NavigateTo(bool highlightLine, string highlightColor, bool usePreviewPane) { // Fall back to the file and line number IVsWindowFrame windowFrame = SdkUiUtilities.OpenDocument(SarifViewerPackage.ServiceProvider, this.FullFilePath, usePreviewPane); if (windowFrame != null) { IVsTextView textView = GetTextViewFromFrame(windowFrame); if (textView == null) { return null; } var sourceLocation = this.GetSourceLocation(); // Navigate the caret to the desired location. Text span uses 0-based indexes TextSpan ts; ts.iEndLine = ts.iStartLine = sourceLocation.StartLine - 1; ts.iEndIndex = ts.iStartIndex = Math.Max(sourceLocation.StartColumn - 1, 0); textView.EnsureSpanVisible(ts); textView.SetSelection(ts.iStartLine, ts.iStartIndex, ts.iEndLine, ts.iEndIndex); if (highlightLine) { this.AddSelectionMarker(highlightColor); } } return windowFrame; } /// <summary> /// Get source location of current marker (tracking code place). /// </summary> /// <returns> /// This is clone of stored source location with actual source code coordinates. /// </returns> public Region GetSourceLocation() { Region sourceLocation = new Region() { Offset = m_region.Offset, StartColumn = m_region.StartColumn, EndColumn = m_region.EndColumn, StartLine = m_region.StartLine, EndLine = m_region.EndLine }; SaveCurrentTrackingData(sourceLocation); return sourceLocation; } /// <summary> /// Save current tracking data to stored source location. /// If user will change, save, close and after that open document which has tracking data /// this class will not loose place where code exists. /// </summary> public void SaveCurrentTrackingData() { SaveCurrentTrackingData(m_region); } /// <summary> /// Clear all markers and tracking classes /// </summary> public void Clear() { if (m_marker != null) { RemoveMarker(); } if (IsTracking()) { RemoveTracking(); } m_tagger = null; } /// <summary> /// Select current tracking text with <paramref name="highlightColor"/>. /// If highlightColor is null than code will be selected with color from <seealso cref="Color"/>. /// If the mark doesn't support tracking changes, then we simply ignore this condition (addresses VS crash /// reported in Bug 476347 : Code Analysis clicking error report C6244 causes VS2012 to crash). /// Tracking changes helps to ensure that we nativate to the right line even if edits to the file /// have occured, but even if that behavior doesn't work right, it is better to /// simply return here (before the fix this code threw an exception which terminated VS). /// </summary> /// <param name="highlightColor">Color</param> public void AddSelectionMarker(string highlightColor) { if (!IsTracking()) { return; } m_marker = m_tagger.CreateTagSpan(m_trackingSpan, new TextMarkerTag(highlightColor ?? Color)); } /// <summary> /// Add tracking for text in <paramref name="span"/> for document with id <paramref name="docCookie"/>. /// </summary> public void AddTracking(IWpfTextView wpfTextView, ITextSnapshot textSnapshot, long docCookie, Span span) { Debug.Assert(docCookie >= 0); Debug.Assert(!IsTracking(), "This marker already tracking changes."); m_docCookie = docCookie; CreateTracking(wpfTextView, textSnapshot, span); } /// <summary> /// Remove selection for tracking text /// </summary> public void RemoveMarker() { if (m_tagger != null && m_marker != null) { m_tagger.RemoveTagSpan(m_marker); } m_marker = null; } /// <summary> /// Check if current class track changes for document <paramref name="docCookie"/> /// </summary> public bool IsTracking(long docCookie) { return m_docCookie.HasValue && m_docCookie.Value == docCookie && m_trackingSpan != null; } public void DetachFromDocument(long docCookie) { if (this.IsTracking(docCookie)) { this.Clear(); } } /// <summary> /// An overridden method for reacting to the event of a document window /// being opened /// </summary> public void AttachToDocument(string documentName, long docCookie, IVsWindowFrame frame) { // For these cases, this event has nothing to do with this item if (frame == null || this.IsTracking(docCookie) || string.Compare(documentName, this.FullFilePath, StringComparison.OrdinalIgnoreCase) != 0) { return; } AttachToDocumentWorker(frame, docCookie); } private IVsTextView GetTextViewFromFrame(IVsWindowFrame frame) { // Get the document view from the window frame, then get the text view object docView; int hr = frame.GetProperty((int)__VSFPROPID.VSFPROPID_DocView, out docView); if ((hr != 0 && hr != 1) || docView == null) { return null; } IVsCodeWindow codeWindow = docView as IVsCodeWindow; IVsTextView textView; codeWindow.GetLastActiveView(out textView); if (textView == null) { codeWindow.GetPrimaryView(out textView); } return textView; } /// <summary> /// Check that current <paramref name="marker"/> point to correct line position /// and attach it to <paramref name="docCookie"/> for track changes. /// </summary> private void AttachToDocumentWorker(IVsWindowFrame frame, long docCookie) { var sourceLocation = this.GetSourceLocation(); int line = sourceLocation.StartLine; // Coerce the line numbers so we don't go out of bound. However, if we have to // coerce the line numbers, then we won't perform highlighting because most likely // we will highlight the wrong line. The idea here is to just go to the top or bottom // of the file as our "best effort" to be closest where it thinks it should be if (line <= 0) { line = 1; } IVsTextView textView = GetTextViewFromFrame(frame); if (textView != null) { // Locate the specific line/column position in the text view and go there IVsTextLines textLines; textView.GetBuffer(out textLines); if (textLines != null) { int lastLine; int length; int hr = textLines.GetLastLineIndex(out lastLine, out length); if (hr != 0) { return; } // our source code lines are 1-based, and the VS API source code lines are 0-based lastLine = lastLine + 1; // Same thing here, coerce the line number if it's going out of bound if (line > lastLine) { line = lastLine; } } // Call a bunch of functions to get the WPF text view so we can perform the highlighting only // if we haven't yet IWpfTextView wpfTextView = GetWpfTextView(textView); if (wpfTextView != null) { AttachMarkerToTextView(wpfTextView, docCookie, this, line, sourceLocation.StartColumn, line + (sourceLocation.EndLine - sourceLocation.StartLine), sourceLocation.EndColumn); } } } /// <summary> /// Helper method for getting a IWpfTextView from a IVsTextView object /// </summary> /// <param name="textView"></param> /// <returns></returns> private IWpfTextView GetWpfTextView(IVsTextView textView) { IWpfTextViewHost textViewHost = null; IVsUserData userData = textView as IVsUserData; if (userData != null) { Guid guid = Microsoft.VisualStudio.Editor.DefGuidList.guidIWpfTextViewHost; object wpfTextViewHost = null; userData.GetData(ref guid, out wpfTextViewHost); textViewHost = wpfTextViewHost as IWpfTextViewHost; } if (textViewHost == null) { return null; } return textViewHost.TextView; } /// <summary> /// Highlight the source code on a particular line /// </summary> private static void AttachMarkerToTextView(IWpfTextView textView, long docCookie, ResultTextMarker marker, int line, int column, int endLine, int endColumn) { // If for some reason the start line is not correct, just skip the highlighting ITextSnapshot textSnapshot = textView.TextSnapshot; if (line > textSnapshot.LineCount) { return; } Span spanToColor; int markerStart, markerEnd = 0; try { // Fix up the end line number if it's inconsistent if (endLine <= 0 || endLine < line) { endLine = line; } bool coerced = false; // Calculate the start and end marker bound. Adjust for the column values if // the values don't make sense. Make sure we handle the case of empty file correctly ITextSnapshotLine startTextLine = textSnapshot.GetLineFromLineNumber(Math.Max(line - 1, 0)); ITextSnapshotLine endTextLine = textSnapshot.GetLineFromLineNumber(Math.Max(endLine - 1, 0)); if (column <= 0 || column >= startTextLine.Length) { column = 1; coerced = true; } // Calculate the end marker bound. Perform coersion on the values if they aren't consistent if (endColumn <= 0 && endColumn >= endTextLine.Length) { endColumn = endTextLine.Length; coerced = true; } // If we are highlighting just one line and the column values don't make // sense or we corrected one or more of them, then simply mark the // entire line if (endLine == line && (coerced || column >= endColumn)) { column = 1; endColumn = endTextLine.Length; } // Create a span with the calculated markers markerStart = startTextLine.Start.Position + column - 1; markerEnd = endTextLine.Start.Position + endColumn; spanToColor = Span.FromBounds(markerStart, markerEnd); marker.AddTracking(textView, textSnapshot, docCookie, spanToColor); } catch (Exception e) { // Log the exception and move ahead. We don't want to bubble this or fail. // We just don't color the problem line. Debug.Print(e.Message); } } private void RemoveTracking() { if (m_trackingSpan != null) { // TODO: Find a way to delete TrackingSpan m_marker = m_tagger.CreateTagSpan(m_trackingSpan, new TextMarkerTag(Color)); RemoveMarker(); m_trackingSpan = null; m_tagger = null; m_docCookie = null; } } private void CreateTracking(IWpfTextView textView, ITextSnapshot textSnapshot, Span span) { if (m_trackingSpan != null) return; m_textView = textView; if (m_tagger == null) { IComponentModel componentModel = (IComponentModel)m_serviceProvider.GetService(typeof(SComponentModel)); ITextMarkerProviderFactory textMarkerProviderFactory = componentModel.GetService<ITextMarkerProviderFactory>(); // Get a SimpleTagger over the buffer to color m_tagger = textMarkerProviderFactory.GetTextMarkerTagger(m_textView.TextBuffer); } // Add the marker if (m_tagger != null) { // The list of colors for TextMarkerTag are defined in Platform\Text\Impl\TextMarkerAdornment\TextMarkerProviderFactory.cs m_trackingSpan = textSnapshot.CreateTrackingSpan(span, SpanTrackingMode.EdgeExclusive); } } private bool IsValidMarker() { return (m_marker != null && m_marker.Span != null && m_marker.Span.TextBuffer != null && m_marker.Span.TextBuffer.CurrentSnapshot != null); } private void SaveCurrentTrackingData(Region sourceLocation) { try { if (!IsTracking()) { return; } ITextSnapshot textSnapshot = m_trackingSpan.TextBuffer.CurrentSnapshot; SnapshotPoint startPoint = m_trackingSpan.GetStartPoint(textSnapshot); SnapshotPoint endPoint = m_trackingSpan.GetEndPoint(textSnapshot); var startLine = startPoint.GetContainingLine(); var endLine = endPoint.GetContainingLine(); var textLineStart = m_textView.GetTextViewLineContainingBufferPosition(startPoint); var textLineEnd = m_textView.GetTextViewLineContainingBufferPosition(endPoint); sourceLocation.StartColumn = startLine.Start.Position - textLineStart.Start.Position; sourceLocation.EndColumn = endLine.End.Position - textLineEnd.Start.Position; sourceLocation.StartLine = startLine.LineNumber + 1; sourceLocation.EndLine = endLine.LineNumber + 1; } catch (InvalidOperationException) { // Editor throws InvalidOperationException in some cases - // We act like tracking isn't turned on if this is thrown to avoid // taking all of VS down. } } private bool IsTracking() { return m_docCookie.HasValue && IsTracking(m_docCookie.Value); } } }
1
10,940
This one was wrong.
microsoft-sarif-sdk
.cs
@@ -1697,9 +1697,14 @@ type Observer interface { // updated locally, but not yet saved at the server. LocalChange(ctx context.Context, node Node, write WriteRange) // BatchChanges announces that the nodes have all been updated - // together atomically. Each NodeChange in changes affects the - // same top-level folder and branch. - BatchChanges(ctx context.Context, changes []NodeChange) + // together atomically. Each NodeChange in `changes` affects the + // same top-level folder and branch. `allAffectedNodeIDs` is a + // list of all the nodes that had their underlying data changed, + // even if it wasn't an user-visible change (e.g., if a + // subdirectory was updated, the directory block for the TLF root + // is updated but that wouldn't be visible to a user). + BatchChanges(ctx context.Context, changes []NodeChange, + allAffectedNodeIDs []NodeID) // TlfHandleChange announces that the handle of the corresponding // folder branch has changed, likely due to previously-unresolved // assertions becoming resolved. This indicates that the listener
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "time" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfsmd" "github.com/keybase/kbfs/tlf" metrics "github.com/rcrowley/go-metrics" "golang.org/x/net/context" ) type dataVersioner interface { // DataVersion returns the data version for this block DataVersion() DataVer } type logMaker interface { MakeLogger(module string) logger.Logger } type blockCacher interface { BlockCache() BlockCache } type keyGetterGetter interface { keyGetter() blockKeyGetter } type codecGetter interface { Codec() kbfscodec.Codec } type blockServerGetter interface { BlockServer() BlockServer } type cryptoPureGetter interface { cryptoPure() cryptoPure } type cryptoGetter interface { Crypto() Crypto } type currentSessionGetterGetter interface { CurrentSessionGetter() CurrentSessionGetter } type signerGetter interface { Signer() kbfscrypto.Signer } type diskBlockCacheGetter interface { DiskBlockCache() DiskBlockCache } type diskBlockCacheSetter interface { MakeDiskBlockCacheIfNotExists() error } type clockGetter interface { Clock() Clock } type diskLimiterGetter interface { DiskLimiter() DiskLimiter } type syncedTlfGetterSetter interface { IsSyncedTlf(tlfID tlf.ID) bool SetTlfSyncState(tlfID tlf.ID, isSynced bool) error } type blockRetrieverGetter interface { BlockRetriever() BlockRetriever } // Block just needs to be (de)serialized using msgpack type Block interface { dataVersioner // GetEncodedSize returns the encoded size of this block, but only // if it has been previously set; otherwise it returns 0. GetEncodedSize() uint32 // SetEncodedSize sets the encoded size of this block, locally // caching it. The encoded size is not serialized. SetEncodedSize(size uint32) // NewEmpty returns a new block of the same type as this block NewEmpty() Block // Set sets this block to the same value as the passed-in block Set(other Block) // ToCommonBlock retrieves this block as a *CommonBlock. ToCommonBlock() *CommonBlock } // NodeID is a unique but transient ID for a Node. That is, two Node // objects in memory at the same time represent the same file or // directory if and only if their NodeIDs are equal (by pointer). type NodeID interface { // ParentID returns the NodeID of the directory containing the // pointed-to file or directory, or nil if none exists. ParentID() NodeID } // Node represents a direct pointer to a file or directory in KBFS. // It is somewhat like an inode in a regular file system. Users of // KBFS can use Node as a handle when accessing files or directories // they have previously looked up. type Node interface { // GetID returns the ID of this Node. This should be used as a // map key instead of the Node itself. GetID() NodeID // GetFolderBranch returns the folder ID and branch for this Node. GetFolderBranch() FolderBranch // GetBasename returns the current basename of the node, or "" // if the node has been unlinked. GetBasename() string // Readonly returns true if KBFS should outright reject any write // attempts on data or directory structures of this node. Though // note that even if it returns false, KBFS can reject writes to // the node for other reasons, such as TLF permissions. An // implementation that wraps another `Node` (`inner`) must return // `inner.Readonly()` if it decides not to return `true` on its // own. Readonly(ctx context.Context) bool // ShouldCreateMissedLookup is called for Nodes representing // directories, whenever `name` is looked up but is not found in // the directory. If the Node decides a new entry should be // created matching this lookup, it should return `true` as well // as a context to use for the creation, the type of the new entry // and the symbolic link contents if the entry is a Sym; the // caller should then create this entry. Otherwise it should // return false. An implementation that wraps another `Node` // (`inner`) must return `inner.ShouldCreateMissedLookup()` if it // decides not to return `true` on its own. ShouldCreateMissedLookup(ctx context.Context, name string) ( shouldCreate bool, newCtx context.Context, et EntryType, sympath string) // ShouldRetryOnDirRead is called for Nodes representing // directories, whenever a `Lookup` or `GetDirChildren` is done on // them. It should return true to instruct the caller that it // should re-sync its view of the directory and retry the // operation. ShouldRetryOnDirRead(ctx context.Context) bool // WrapChild returns a wrapped version of child, if desired, to // add custom behavior to the child node. An implementation that // wraps another `Node` (`inner`) must first call // `inner.WrapChild(child)` before performing its own wrapping // operation, to ensure that all wrapping is preserved and that it // happens in the correct order. WrapChild(child Node) Node // Unwrap returns the initial, unwrapped Node that was used to // create this Node. Unwrap() Node } // KBFSOps handles all file system operations. Expands all indirect // pointers. Operations that modify the server data change all the // block IDs along the path, and so must return a path with the new // BlockIds so the caller can update their references. // // KBFSOps implementations must guarantee goroutine-safety of calls on // a per-top-level-folder basis. // // There are two types of operations that could block: // * remote-sync operations, that need to synchronously update the // MD for the corresponding top-level folder. When these // operations return successfully, they will have guaranteed to // have successfully written the modification to the KBFS servers. // * remote-access operations, that don't sync any modifications to KBFS // servers, but may block on reading data from the servers. // // KBFSOps implementations are supposed to give git-like consistency // semantics for modification operations; they will be visible to // other clients immediately after the remote-sync operations succeed, // if and only if there was no other intervening modification to the // same folder. If not, the change will be sync'd to the server in a // special per-device "unmerged" area before the operation succeeds. // In this case, the modification will not be visible to other clients // until the KBFS code on this device performs automatic conflict // resolution in the background. // // All methods take a Context (see https://blog.golang.org/context), // and if that context is cancelled during the operation, KBFSOps will // abort any blocking calls and return ctx.Err(). Any notifications // resulting from an operation will also include this ctx (or a // Context derived from it), allowing the caller to determine whether // the notification is a result of their own action or an external // action. type KBFSOps interface { // GetFavorites returns the logged-in user's list of favorite // top-level folders. This is a remote-access operation. GetFavorites(ctx context.Context) ([]Favorite, error) // RefreshCachedFavorites tells the instances to forget any cached // favorites list and fetch a new list from the server. The // effects are asychronous; if there's an error refreshing the // favorites, the cached favorites will become empty. RefreshCachedFavorites(ctx context.Context) // AddFavorite adds the favorite to both the server and // the local cache. AddFavorite(ctx context.Context, fav Favorite) error // DeleteFavorite deletes the favorite from both the server and // the local cache. Idempotent, so it succeeds even if the folder // isn't favorited. DeleteFavorite(ctx context.Context, fav Favorite) error // GetTLFCryptKeys gets crypt key of all generations as well as // TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by // generation, starting with the key for FirstValidKeyGen. GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) ( keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) // GetTLFID gets the TLF ID for tlfHandle. GetTLFID(ctx context.Context, tlfHandle *TlfHandle) (tlf.ID, error) // GetTLFHandle returns the TLF handle for a given node. GetTLFHandle(ctx context.Context, node Node) (*TlfHandle, error) // GetOrCreateRootNode returns the root node and root entry // info associated with the given TLF handle and branch, if // the logged-in user has read permissions to the top-level // folder. It creates the folder if one doesn't exist yet (and // branch == MasterBranch), and the logged-in user has write // permissions to the top-level folder. This is a // remote-access operation. GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetRootNode is like GetOrCreateRootNode but if the root node // does not exist it will return a nil Node and not create it. GetRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetDirChildren returns a map of children in the directory, // mapped to their EntryInfo, if the logged-in user has read // permission for the top-level folder. This is a remote-access // operation. GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error) // Lookup returns the Node and entry info associated with a // given name in a directory, if the logged-in user has read // permissions to the top-level folder. The returned Node is nil // if the name is a symlink. This is a remote-access operation. Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error) // Stat returns the entry info associated with a // given Node, if the logged-in user has read permissions to the // top-level folder. This is a remote-access operation. Stat(ctx context.Context, node Node) (EntryInfo, error) // CreateDir creates a new subdirectory under the given node, if // the logged-in user has write permission to the top-level // folder. Returns the new Node for the created subdirectory, and // its new entry info. This is a remote-sync operation. CreateDir(ctx context.Context, dir Node, name string) ( Node, EntryInfo, error) // CreateFile creates a new file under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new Node for the created file, and its new // entry info. excl (when implemented) specifies whether this is an exclusive // create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a // Unix open() call. // // This is a remote-sync operation. CreateFile(ctx context.Context, dir Node, name string, isExec bool, excl Excl) ( Node, EntryInfo, error) // CreateLink creates a new symlink under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new entry info for the created symlink. This // is a remote-sync operation. CreateLink(ctx context.Context, dir Node, fromName string, toPath string) ( EntryInfo, error) // RemoveDir removes the subdirectory represented by the given // node, if the logged-in user has write permission to the // top-level folder. Will return an error if the subdirectory is // not empty. This is a remote-sync operation. RemoveDir(ctx context.Context, dir Node, dirName string) error // RemoveEntry removes the directory entry represented by the // given node, if the logged-in user has write permission to the // top-level folder. This is a remote-sync operation. RemoveEntry(ctx context.Context, dir Node, name string) error // Rename performs an atomic rename operation with a given // top-level folder if the logged-in user has write permission to // that folder, and will return an error if nodes from different // folders are passed in. Also returns an error if the new name // already has an entry corresponding to an existing directory // (only non-dir types may be renamed over). This is a // remote-sync operation. Rename(ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) error // Read fills in the given buffer with data from the file at the // given node starting at the given offset, if the logged-in user // has read permission to the top-level folder. The read data // reflects any outstanding writes and truncates to that file that // have been written through this KBFSOps object, even if those // writes have not yet been sync'd. There is no guarantee that // Read returns all of the requested data; it will return the // number of bytes that it wrote to the dest buffer. Reads on an // unlinked file may or may not succeed, depending on whether or // not the data has been cached locally. If (0, nil) is returned, // that means EOF has been reached. This is a remote-access // operation. Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error) // Write modifies the file at the given node, by writing the given // buffer at the given offset within the file, if the logged-in // user has write permission to the top-level folder. It // overwrites any data already there, and extends the file size as // necessary to accomodate the new data. It guarantees to write // the entire buffer in one operation. Writes on an unlinked file // may or may not succeed as no-ops, depending on whether or not // the necessary blocks have been locally cached. This is a // remote-access operation. Write(ctx context.Context, file Node, data []byte, off int64) error // Truncate modifies the file at the given node, by either // shrinking or extending its size to match the given size, if the // logged-in user has write permission to the top-level folder. // If extending the file, it pads the new data with 0s. Truncates // on an unlinked file may or may not succeed as no-ops, depending // on whether or not the necessary blocks have been locally // cached. This is a remote-access operation. Truncate(ctx context.Context, file Node, size uint64) error // SetEx turns on or off the executable bit on the file // represented by a given node, if the logged-in user has write // permissions to the top-level folder. This is a remote-sync // operation. SetEx(ctx context.Context, file Node, ex bool) error // SetMtime sets the modification time on the file represented by // a given node, if the logged-in user has write permissions to // the top-level folder. If mtime is nil, it is a noop. This is // a remote-sync operation. SetMtime(ctx context.Context, file Node, mtime *time.Time) error // SyncAll flushes all outstanding writes and truncates for any // dirty files to the KBFS servers within the given folder, if the // logged-in user has write permissions to the top-level folder. // If done through a file system interface, this may include // modifications done via multiple file handles. This is a // remote-sync operation. SyncAll(ctx context.Context, folderBranch FolderBranch) error // FolderStatus returns the status of a particular folder/branch, along // with a channel that will be closed when the status has been // updated (to eliminate the need for polling this method). FolderStatus(ctx context.Context, folderBranch FolderBranch) ( FolderBranchStatus, <-chan StatusUpdate, error) // Status returns the status of KBFS, along with a channel that will be // closed when the status has been updated (to eliminate the need for // polling this method). Note that this channel only applies to // connection status changes. // // KBFSStatus can be non-empty even if there is an error. Status(ctx context.Context) ( KBFSStatus, <-chan StatusUpdate, error) // UnstageForTesting clears out this device's staged state, if // any, and fast-forwards to the current head of this // folder-branch. UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error // RequestRekey requests to rekey this folder. Note that this asynchronously // requests a rekey, so canceling ctx doesn't cancel the rekey. RequestRekey(ctx context.Context, id tlf.ID) // SyncFromServerForTesting blocks until the local client has // contacted the server and guaranteed that all known updates // for the given top-level folder have been applied locally // (and notifications sent out to any observers). It returns // an error if this folder-branch is currently unmerged or // dirty locally. If lockBeforeGet is non-nil, it blocks on idempotently // taking the lock from server at the time it gets any metadata. SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch, lockBeforeGet *keybase1.LockID) error // GetUpdateHistory returns a complete history of all the merged // updates of the given folder, in a data structure that's // suitable for encoding directly into JSON. This is an expensive // operation, and should only be used for ocassional debugging. // Note that the history does not include any unmerged changes or // outstanding writes from the local device. GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) ( history TLFUpdateHistory, err error) // GetEditHistory returns a clustered list of the most recent file // edits by each of the valid writers of the given folder. users // looking to get updates to this list can register as an observer // for the folder. GetEditHistory(ctx context.Context, folderBranch FolderBranch) ( edits TlfWriterEdits, err error) // GetNodeMetadata gets metadata associated with a Node. GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error) // Shutdown is called to clean up any resources associated with // this KBFSOps instance. Shutdown(ctx context.Context) error // PushConnectionStatusChange updates the status of a service for // human readable connection status tracking. PushConnectionStatusChange(service string, newStatus error) // PushStatusChange causes Status listeners to be notified via closing // the status channel. PushStatusChange() // ClearPrivateFolderMD clears any cached private folder metadata, // e.g. on a logout. ClearPrivateFolderMD(ctx context.Context) // ForceFastForward forwards the nodes of all folders that have // been previously cleared with `ClearPrivateFolderMD` to their // newest version. It works asynchronously, so no error is // returned. ForceFastForward(ctx context.Context) // TeamNameChanged indicates that a team has changed its name, and // we should clean up any outstanding handle info associated with // the team ID. TeamNameChanged(ctx context.Context, tid keybase1.TeamID) // KickoffAllOutstandingRekeys kicks off all outstanding rekeys. It does // nothing to folders that have not scheduled a rekey. This should be // called when we receive an event of "paper key cached" from service. KickoffAllOutstandingRekeys() error } type merkleRootGetter interface { // GetCurrentMerkleRoot returns the current root of the global // Keybase Merkle tree. GetCurrentMerkleRoot(ctx context.Context) (keybase1.MerkleRootV2, error) } type gitMetadataPutter interface { PutGitMetadata(ctx context.Context, folder keybase1.Folder, repoID keybase1.RepoID, metadata keybase1.GitLocalMetadata) error } // KeybaseService is an interface for communicating with the keybase // service. type KeybaseService interface { merkleRootGetter gitMetadataPutter // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UID) mapping // can be trusted. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // Identify, given an assertion, returns a UserInfo struct // with the user that matches that assertion, or an error // otherwise. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // ResolveIdentifyImplicitTeam resolves, and optionally // identifies, an implicit team. If the implicit team doesn't yet // exist, and doIdentifies is true, one is created. ResolveIdentifyImplicitTeam( ctx context.Context, assertions, suffix string, tlfType tlf.Type, doIdentifies bool, reason string) (ImplicitTeamInfo, error) // ResolveImplicitTeamByID resolves an implicit team to a team // name, given a team ID. ResolveImplicitTeamByID( ctx context.Context, teamID keybase1.TeamID) (string, error) // CreateTeamTLF associates the given TLF ID with the team ID in // the team's sigchain. If the team already has a TLF ID // associated with it, this overwrites it. CreateTeamTLF( ctx context.Context, teamID keybase1.TeamID, tlfID tlf.ID) error // LoadUserPlusKeys returns a UserInfo struct for a // user with the specified UID. // If you have the UID for a user and don't require Identify to // validate an assertion or the identity of a user, use this to // get UserInfo structs as it is much cheaper than Identify. // // pollForKID, if non empty, causes `PollForKID` field to be populated, which // causes the service to poll for the given KID. This is useful during // provisioning where the provisioner needs to get the MD revision that the // provisionee has set the rekey bit on. LoadUserPlusKeys(ctx context.Context, uid keybase1.UID, pollForKID keybase1.KID) (UserInfo, error) // LoadUnverifiedKeys returns a list of unverified public keys. They are the union // of all known public keys associated with the account and the currently verified // keys currently part of the user's sigchain. LoadUnverifiedKeys(ctx context.Context, uid keybase1.UID) ( []keybase1.PublicKey, error) // LoadTeamPlusKeys returns a TeamInfo struct for a team with the // specified TeamID. The caller can specify `desiredKeyGen` to // force a server check if that particular key gen isn't yet // known; it may be set to UnspecifiedKeyGen if no server check is // required. The caller can specify `desiredUID` and // `desiredRole` to force a server check if that particular UID // isn't a member of the team yet according to local caches; it // may be set to "" if no server check is required. LoadTeamPlusKeys(ctx context.Context, tid keybase1.TeamID, desiredKeyGen kbfsmd.KeyGen, desiredUser keybase1.UserVersion, desiredRole keybase1.TeamRole) (TeamInfo, error) // CurrentSession returns a SessionInfo struct with all the // information for the current session, or an error otherwise. CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error) // FavoriteAdd adds the given folder to the list of favorites. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteAdd removes the given folder from the list of // favorites. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the current list of favorites. FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error) // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error // NotifySyncStatus sends a sync status notification. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) error // FlushUserFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached information about the given user. // This does NOT involve communication with the daemon, this is // just to force future calls loading this user to fall through to // the daemon itself, rather than being served from the cache. FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID) // FlushUserUnverifiedKeysFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached unverified keys for the given user. FlushUserUnverifiedKeysFromLocalCache(ctx context.Context, uid keybase1.UID) // TODO: Add CryptoClient methods, too. // EstablishMountDir asks the service for the current mount path // and sets it if not established. EstablishMountDir(ctx context.Context) (string, error) // Shutdown frees any resources associated with this // instance. No other methods may be called after this is // called. Shutdown() } // KeybaseServiceCn defines methods needed to construct KeybaseService // and Crypto implementations. type KeybaseServiceCn interface { NewKeybaseService(config Config, params InitParams, ctx Context, log logger.Logger) (KeybaseService, error) NewCrypto(config Config, params InitParams, ctx Context, log logger.Logger) (Crypto, error) } type resolver interface { // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UserOrTeamID) mapping // can be trusted. // // TODO: some of the above assumptions on cacheability aren't // right for subteams, which can change their name, so this may // need updating. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // ResolveImplicitTeam resolves the given implicit team. ResolveImplicitTeam( ctx context.Context, assertions, suffix string, tlfType tlf.Type) ( ImplicitTeamInfo, error) // ResolveImplicitTeamByID resolves the given implicit team, given // a team ID. ResolveImplicitTeamByID( ctx context.Context, teamID keybase1.TeamID, tlfType tlf.Type) ( ImplicitTeamInfo, error) } type identifier interface { // Identify resolves an assertion (which could also be a // username) to a UserInfo struct, spawning tracker popups if // necessary. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // IdentifyImplicitTeam identifies (and creates if necessary) the // given implicit team. IdentifyImplicitTeam( ctx context.Context, assertions, suffix string, tlfType tlf.Type, reason string) (ImplicitTeamInfo, error) } type normalizedUsernameGetter interface { // GetNormalizedUsername returns the normalized username // corresponding to the given UID. GetNormalizedUsername(ctx context.Context, id keybase1.UserOrTeamID) ( libkb.NormalizedUsername, error) } // CurrentSessionGetter is an interface for objects that can return // session info. type CurrentSessionGetter interface { // GetCurrentSession gets the current session info. GetCurrentSession(ctx context.Context) (SessionInfo, error) } // teamMembershipChecker is a copy of kbfsmd.TeamMembershipChecker for // embedding in KBPKI. Unfortunately, this is necessary since mockgen // can't handle embedded interfaces living in other packages. type teamMembershipChecker interface { // IsTeamWriter is a copy of // kbfsmd.TeamMembershipChecker.IsTeamWriter. IsTeamWriter(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) (bool, error) // IsTeamReader is a copy of // kbfsmd.TeamMembershipChecker.IsTeamWriter. IsTeamReader(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID) ( bool, error) } type teamKeysGetter interface { // GetTeamTLFCryptKeys gets all of a team's secret crypt keys, by // generation, as well as the latest key generation number for the // team. The caller can specify `desiredKeyGen` to force a server // check if that particular key gen isn't yet known; it may be set // to UnspecifiedKeyGen if no server check is required. GetTeamTLFCryptKeys(ctx context.Context, tid keybase1.TeamID, desiredKeyGen kbfsmd.KeyGen) ( map[kbfsmd.KeyGen]kbfscrypto.TLFCryptKey, kbfsmd.KeyGen, error) } type teamRootIDGetter interface { // GetTeamRootID returns the root team ID for the given (sub)team // ID. GetTeamRootID(ctx context.Context, tid keybase1.TeamID) ( keybase1.TeamID, error) } // KBPKI interacts with the Keybase daemon to fetch user info. type KBPKI interface { CurrentSessionGetter resolver identifier normalizedUsernameGetter merkleRootGetter teamMembershipChecker teamKeysGetter teamRootIDGetter gitMetadataPutter // HasVerifyingKey returns nil if the given user has the given // VerifyingKey, and an error otherwise. HasVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey, atServerTime time.Time) error // HasUnverifiedVerifyingKey returns nil if the given user has the given // unverified VerifyingKey, and an error otherwise. Note that any match // is with a key not verified to be currently connected to the user via // their sigchain. This is currently only used to verify finalized or // reset TLFs. Further note that unverified keys is a super set of // verified keys. HasUnverifiedVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) error // GetCryptPublicKeys gets all of a user's crypt public keys (including // paper keys). GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) ( []kbfscrypto.CryptPublicKey, error) // TODO: Split the methods below off into a separate // FavoriteOps interface. // FavoriteAdd adds folder to the list of the logged in user's // favorite folders. It is idempotent. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteDelete deletes folder from the list of the logged in user's // favorite folders. It is idempotent. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the list of all favorite folders for // the logged in user. FavoriteList(ctx context.Context) ([]keybase1.Folder, error) // CreateTeamTLF associates the given TLF ID with the team ID in // the team's sigchain. If the team already has a TLF ID // associated with it, this overwrites it. CreateTeamTLF( ctx context.Context, teamID keybase1.TeamID, tlfID tlf.ID) error // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error } // KeyMetadata is an interface for something that holds key // information. This is usually implemented by RootMetadata. type KeyMetadata interface { // TlfID returns the ID of the TLF for which this object holds // key info. TlfID() tlf.ID // TypeForKeying returns the keying type for this MD. TypeForKeying() tlf.KeyingType // LatestKeyGeneration returns the most recent key generation // with key data in this object, or PublicKeyGen if this TLF // is public. LatestKeyGeneration() kbfsmd.KeyGen // GetTlfHandle returns the handle for the TLF. It must not // return nil. // // TODO: Remove the need for this function in this interface, // so that kbfsmd.RootMetadata can implement this interface // fully. GetTlfHandle() *TlfHandle // IsWriter checks that the given user is a valid writer of the TLF // right now. IsWriter( ctx context.Context, checker kbfsmd.TeamMembershipChecker, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) ( bool, error) // HasKeyForUser returns whether or not the given user has // keys for at least one device. Returns an error if the TLF // is public. HasKeyForUser(user keybase1.UID) (bool, error) // GetTLFCryptKeyParams returns all the necessary info to // construct the TLF crypt key for the given key generation, // user, and device (identified by its crypt public key), or // false if not found. This returns an error if the TLF is // public. GetTLFCryptKeyParams( keyGen kbfsmd.KeyGen, user keybase1.UID, key kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.EncryptedTLFCryptKeyClientHalf, kbfscrypto.TLFCryptKeyServerHalfID, bool, error) // StoresHistoricTLFCryptKeys returns whether or not history keys are // symmetrically encrypted; if not, they're encrypted per-device. StoresHistoricTLFCryptKeys() bool // GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given // generation using the current generation's TLFCryptKey. GetHistoricTLFCryptKey(codec kbfscodec.Codec, keyGen kbfsmd.KeyGen, currentKey kbfscrypto.TLFCryptKey) ( kbfscrypto.TLFCryptKey, error) } type encryptionKeyGetter interface { // GetTLFCryptKeyForEncryption gets the crypt key to use for // encryption (i.e., with the latest key generation) for the // TLF with the given metadata. GetTLFCryptKeyForEncryption(ctx context.Context, kmd KeyMetadata) ( kbfscrypto.TLFCryptKey, error) } type mdDecryptionKeyGetter interface { // GetTLFCryptKeyForMDDecryption gets the crypt key to use for the // TLF with the given metadata to decrypt the private portion of // the metadata. It finds the appropriate key from mdWithKeys // (which in most cases is the same as mdToDecrypt) if it's not // already cached. GetTLFCryptKeyForMDDecryption(ctx context.Context, kmdToDecrypt, kmdWithKeys KeyMetadata) ( kbfscrypto.TLFCryptKey, error) } type blockDecryptionKeyGetter interface { // GetTLFCryptKeyForBlockDecryption gets the crypt key to use // for the TLF with the given metadata to decrypt the block // pointed to by the given pointer. GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (kbfscrypto.TLFCryptKey, error) } type blockKeyGetter interface { encryptionKeyGetter blockDecryptionKeyGetter } // KeyManager fetches and constructs the keys needed for KBFS file // operations. type KeyManager interface { blockKeyGetter mdDecryptionKeyGetter // GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations // for current devices. keys contains crypt keys from all generations, in // order, starting from FirstValidKeyGen. GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd KeyMetadata) ( keys []kbfscrypto.TLFCryptKey, err error) // Rekey checks the given MD object, if it is a private TLF, // against the current set of device keys for all valid // readers and writers. If there are any new devices, it // updates all existing key generations to include the new // devices. If there are devices that have been removed, it // creates a new epoch of keys for the TLF. If there was an // error, or the RootMetadata wasn't changed, it returns false. // Otherwise, it returns true. If a new key generation is // added the second return value points to this new key. This // is to allow for caching of the TLF crypt key only after a // successful merged write of the metadata. Otherwise we could // prematurely pollute the key cache. // // If the given MD object is a public TLF, it simply updates // the TLF's handle with any newly-resolved writers. // // If promptPaper is set, prompts for any unlocked paper keys. // promptPaper shouldn't be set if md is for a public TLF. Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) ( bool, *kbfscrypto.TLFCryptKey, error) } // Reporter exports events (asynchronously) to any number of sinks type Reporter interface { // ReportErr records that a given error happened. ReportErr(ctx context.Context, tlfName tlf.CanonicalName, t tlf.Type, mode ErrorModeType, err error) // AllKnownErrors returns all errors known to this Reporter. AllKnownErrors() []ReportedError // Notify sends the given notification to any sink. Notify(ctx context.Context, notification *keybase1.FSNotification) // NotifySyncStatus sends the given path sync status to any sink. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) // Shutdown frees any resources allocated by a Reporter. Shutdown() } // MDCache gets and puts plaintext top-level metadata into the cache. type MDCache interface { // Get gets the metadata object associated with the given TLF ID, // revision number, and branch ID (kbfsmd.NullBranchID for merged MD). Get(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) (ImmutableRootMetadata, error) // Put stores the metadata object, only if an MD matching that TLF // ID, revision number, and branch ID isn't already cached. If // there is already a matching item in the cache, we require that // caller manages the cache explicitly by deleting or replacing it // explicitly. This should be used when putting existing MDs // being fetched from the server. Put(md ImmutableRootMetadata) error // Delete removes the given metadata object from the cache if it exists. Delete(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) // Replace replaces the entry matching the md under the old branch // ID with the new one. If the old entry doesn't exist, this is // equivalent to a Put, except that it overrides anything else // that's already in the cache. This should be used when putting // new MDs created locally. Replace(newRmd ImmutableRootMetadata, oldBID kbfsmd.BranchID) error // MarkPutToServer sets `PutToServer` to true for the specified // MD, if it already exists in the cache. MarkPutToServer(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) // GetIDForHandle retrieves a cached, trusted TLF ID for the given // handle, if one exists. GetIDForHandle(handle *TlfHandle) (tlf.ID, error) // PutIDForHandle caches a trusted TLF ID for the given handle. PutIDForHandle(handle *TlfHandle, id tlf.ID) error // ChangeHandleForID moves an ID to be under a new handle, if the // ID is cached already. ChangeHandleForID(oldHandle *TlfHandle, newHandle *TlfHandle) } // KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys. type KeyCache interface { // GetTLFCryptKey gets the crypt key for the given TLF. GetTLFCryptKey(tlf.ID, kbfsmd.KeyGen) (kbfscrypto.TLFCryptKey, error) // PutTLFCryptKey stores the crypt key for the given TLF. PutTLFCryptKey(tlf.ID, kbfsmd.KeyGen, kbfscrypto.TLFCryptKey) error } // BlockCacheLifetime denotes the lifetime of an entry in BlockCache. type BlockCacheLifetime int func (l BlockCacheLifetime) String() string { switch l { case NoCacheEntry: return "NoCacheEntry" case TransientEntry: return "TransientEntry" case PermanentEntry: return "PermanentEntry" } return "Unknown" } const ( // NoCacheEntry means that the entry will not be cached. NoCacheEntry BlockCacheLifetime = iota // TransientEntry means that the cache entry may be evicted at // any time. TransientEntry // PermanentEntry means that the cache entry must remain until // explicitly removed from the cache. PermanentEntry ) // BlockCacheSimple gets and puts plaintext dir blocks and file blocks into // a cache. These blocks are immutable and identified by their // content hash. type BlockCacheSimple interface { // Get gets the block associated with the given block ID. Get(ptr BlockPointer) (Block, error) // Put stores the final (content-addressable) block associated // with the given block ID. If lifetime is TransientEntry, // then it is assumed that the block exists on the server and // the entry may be evicted from the cache at any time. If // lifetime is PermanentEntry, then it is assumed that the // block doesn't exist on the server and must remain in the // cache until explicitly removed. As an intermediary state, // as when a block is being sent to the server, the block may // be put into the cache both with TransientEntry and // PermanentEntry -- these are two separate entries. This is // fine, since the block should be the same. Put(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) error } // BlockCache specifies the interface of BlockCacheSimple, and also more // advanced and internal methods. type BlockCache interface { BlockCacheSimple // CheckForKnownPtr sees whether this cache has a transient // entry for the given file block, which must be a direct file // block containing data). Returns the full BlockPointer // associated with that ID, including key and data versions. // If no ID is known, return an uninitialized BlockPointer and // a nil error. CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (BlockPointer, error) // DeleteTransient removes the transient entry for the given // pointer from the cache, as well as any cached IDs so the block // won't be reused. DeleteTransient(ptr BlockPointer, tlf tlf.ID) error // Delete removes the permanent entry for the non-dirty block // associated with the given block ID from the cache. No // error is returned if no block exists for the given ID. DeletePermanent(id kbfsblock.ID) error // DeleteKnownPtr removes the cached ID for the given file // block. It does not remove the block itself. DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error // GetWithPrefetch retrieves a block from the cache, along with the block's // prefetch status. GetWithPrefetch(ptr BlockPointer) (block Block, prefetchStatus PrefetchStatus, lifetime BlockCacheLifetime, err error) // PutWithPrefetch puts a block into the cache, along with whether or not // it has triggered or finished a prefetch. PutWithPrefetch(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime, prefetchStatus PrefetchStatus) error // SetCleanBytesCapacity atomically sets clean bytes capacity for block // cache. SetCleanBytesCapacity(capacity uint64) // GetCleanBytesCapacity atomically gets clean bytes capacity for block // cache. GetCleanBytesCapacity() (capacity uint64) } // DirtyPermChan is a channel that gets closed when the holder has // permission to write. We are forced to define it as a type due to a // bug in mockgen that can't handle return values with a chan // struct{}. type DirtyPermChan <-chan struct{} // DirtyBlockCache gets and puts plaintext dir blocks and file blocks // into a cache, which have been modified by the application and not // yet committed on the KBFS servers. They are identified by a // (potentially random) ID that may not have any relationship with // their context, along with a Branch in case the same TLF is being // modified via multiple branches. Dirty blocks are never evicted, // they must be deleted explicitly. type DirtyBlockCache interface { // Get gets the block associated with the given block ID. Returns // the dirty block for the given ID, if one exists. Get(tlfID tlf.ID, ptr BlockPointer, branch BranchName) (Block, error) // Put stores a dirty block currently identified by the // given block pointer and branch name. Put(tlfID tlf.ID, ptr BlockPointer, branch BranchName, block Block) error // Delete removes the dirty block associated with the given block // pointer and branch from the cache. No error is returned if no // block exists for the given ID. Delete(tlfID tlf.ID, ptr BlockPointer, branch BranchName) error // IsDirty states whether or not the block associated with the // given block pointer and branch name is dirty in this cache. IsDirty(tlfID tlf.ID, ptr BlockPointer, branch BranchName) bool // IsAnyDirty returns whether there are any dirty blocks in the // cache. tlfID may be ignored. IsAnyDirty(tlfID tlf.ID) bool // RequestPermissionToDirty is called whenever a user wants to // write data to a file. The caller provides an estimated number // of bytes that will become dirty -- this is difficult to know // exactly without pre-fetching all the blocks involved, but in // practice we can just use the number of bytes sent in via the // Write. It returns a channel that blocks until the cache is // ready to receive more dirty data, at which point the channel is // closed. The user must call // `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has // completed its write and called `UpdateUnsyncedBytes` for all // the exact dirty block sizes. RequestPermissionToDirty(ctx context.Context, tlfID tlf.ID, estimatedDirtyBytes int64) (DirtyPermChan, error) // UpdateUnsyncedBytes is called by a user, who has already been // granted permission to write, with the delta in block sizes that // were dirtied as part of the write. So for example, if a // newly-dirtied block of 20 bytes was extended by 5 bytes, they // should send 25. If on the next write (before any syncs), bytes // 10-15 of that same block were overwritten, they should send 0 // over the channel because there were no new bytes. If an // already-dirtied block is truncated, or if previously requested // bytes have now been updated more accurately in previous // requests, newUnsyncedBytes may be negative. wasSyncing should // be true if `BlockSyncStarted` has already been called for this // block. UpdateUnsyncedBytes(tlfID tlf.ID, newUnsyncedBytes int64, wasSyncing bool) // UpdateSyncingBytes is called when a particular block has // started syncing, or with a negative number when a block is no // longer syncing due to an error (and BlockSyncFinished will // never be called). UpdateSyncingBytes(tlfID tlf.ID, size int64) // BlockSyncFinished is called when a particular block has // finished syncing, though the overall sync might not yet be // complete. This lets the cache know it might be able to grant // more permission to writers. BlockSyncFinished(tlfID tlf.ID, size int64) // SyncFinished is called when a complete sync has completed and // its dirty blocks have been removed from the cache. This lets // the cache know it might be able to grant more permission to // writers. SyncFinished(tlfID tlf.ID, size int64) // ShouldForceSync returns true if the sync buffer is full enough // to force all callers to sync their data immediately. ShouldForceSync(tlfID tlf.ID) bool // Shutdown frees any resources associated with this instance. It // returns an error if there are any unsynced blocks. Shutdown() error } // DiskBlockCache caches blocks to the disk. type DiskBlockCache interface { // Get gets a block from the disk cache. Get(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID) ( buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, prefetchStatus PrefetchStatus, err error) // Put puts a block to the disk cache. Returns after it has updated the // metadata but before it has finished writing the block. Put(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // Delete deletes some blocks from the disk cache. Delete(ctx context.Context, blockIDs []kbfsblock.ID) (numRemoved int, sizeRemoved int64, err error) // UpdateMetadata updates metadata for a given block in the disk cache. UpdateMetadata(ctx context.Context, blockID kbfsblock.ID, prefetchStatus PrefetchStatus) error // Status returns the current status of the disk cache. Status(ctx context.Context) map[string]DiskBlockCacheStatus // Shutdown cleanly shuts down the disk block cache. Shutdown(ctx context.Context) } // cryptoPure contains all methods of Crypto that don't depend on // implicit state, i.e. they're pure functions of the input. type cryptoPure interface { // MakeRandomTlfID generates a dir ID using a CSPRNG. MakeRandomTlfID(t tlf.Type) (tlf.ID, error) // MakeRandomBranchID generates a per-device branch ID using a // CSPRNG. It will not return LocalSquashBranchID or // kbfsmd.NullBranchID. MakeRandomBranchID() (kbfsmd.BranchID, error) // MakeTemporaryBlockID generates a temporary block ID using a // CSPRNG. This is used for indirect blocks before they're // committed to the server. MakeTemporaryBlockID() (kbfsblock.ID, error) // MakeRefNonce generates a block reference nonce using a // CSPRNG. This is used for distinguishing different references to // the same BlockID. MakeBlockRefNonce() (kbfsblock.RefNonce, error) // MakeRandomTLFEphemeralKeys generates ephemeral keys using a // CSPRNG for a TLF. These keys can then be used to key/rekey // the TLF. MakeRandomTLFEphemeralKeys() (kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.TLFEphemeralPrivateKey, error) // MakeRandomTLFKeys generates keys using a CSPRNG for a // single key generation of a TLF. MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey, kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error) // MakeRandomBlockCryptKeyServerHalf generates the server-side of // a block crypt key. MakeRandomBlockCryptKeyServerHalf() ( kbfscrypto.BlockCryptKeyServerHalf, error) // EncryptPrivateMetadata encrypts a PrivateMetadata object. EncryptPrivateMetadata( pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) ( kbfscrypto.EncryptedPrivateMetadata, error) // DecryptPrivateMetadata decrypts a PrivateMetadata object. DecryptPrivateMetadata( encryptedPMD kbfscrypto.EncryptedPrivateMetadata, key kbfscrypto.TLFCryptKey) (PrivateMetadata, error) // EncryptBlocks encrypts a block. plainSize is the size of the encoded // block; EncryptBlock() must guarantee that plainSize <= // len(encryptedBlock). EncryptBlock(block Block, key kbfscrypto.BlockCryptKey) ( plainSize int, encryptedBlock kbfscrypto.EncryptedBlock, err error) // DecryptBlock decrypts a block. Similar to EncryptBlock(), // DecryptBlock() must guarantee that (size of the decrypted // block) <= len(encryptedBlock). DecryptBlock(encryptedBlock kbfscrypto.EncryptedBlock, key kbfscrypto.BlockCryptKey, block Block) error } // Crypto signs, verifies, encrypts, and decrypts stuff. type Crypto interface { cryptoPure // Duplicate kbfscrypto.Signer here to work around gomock's // limitations. Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error) SignForKBFS(context.Context, []byte) (kbfscrypto.SignatureInfo, error) SignToString(context.Context, []byte) (string, error) // DecryptTLFCryptKeyClientHalf decrypts a // kbfscrypto.TLFCryptKeyClientHalf using the current device's // private key and the TLF's ephemeral public key. DecryptTLFCryptKeyClientHalf(ctx context.Context, publicKey kbfscrypto.TLFEphemeralPublicKey, encryptedClientHalf kbfscrypto.EncryptedTLFCryptKeyClientHalf) ( kbfscrypto.TLFCryptKeyClientHalf, error) // DecryptTLFCryptKeyClientHalfAny decrypts one of the // kbfscrypto.TLFCryptKeyClientHalf using the available // private keys and the ephemeral public key. If promptPaper // is true, the service will prompt the user for any unlocked // paper keys. DecryptTLFCryptKeyClientHalfAny(ctx context.Context, keys []EncryptedTLFCryptKeyClientAndEphemeral, promptPaper bool) ( kbfscrypto.TLFCryptKeyClientHalf, int, error) // Shutdown frees any resources associated with this instance. Shutdown() } type tlfIDGetter interface { // GetIDForHandle returns the tlf.ID associated with the given // handle, if the logged-in user has read permission on the // folder. It may or may not create the folder if it doesn't // exist yet, and it may return `tlf.NullID` with a `nil` error if // it doesn't create a missing folder. GetIDForHandle(ctx context.Context, handle *TlfHandle) (tlf.ID, error) } // MDOps gets and puts root metadata to an MDServer. On a get, it // verifies the metadata is signed by the metadata's signing key. type MDOps interface { tlfIDGetter // GetForTLF returns the current metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. // // If lockBeforeGet is not nil, it causes mdserver to take the lock on the // lock ID before the get. GetForTLF(ctx context.Context, id tlf.ID, lockBeforeGet *keybase1.LockID) ( ImmutableRootMetadata, error) // GetUnmergedForTLF is the same as the above but for unmerged // metadata. GetUnmergedForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) ( ImmutableRootMetadata, error) // GetRange returns a range of metadata objects corresponding to // the passed revision numbers (inclusive). // // If lockBeforeGet is not nil, it causes mdserver to take the lock on the // lock ID before the get. GetRange(ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision, lockID *keybase1.LockID) ([]ImmutableRootMetadata, error) // GetUnmergedRange is the same as the above but for unmerged // metadata history (inclusive). GetUnmergedRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, start, stop kbfsmd.Revision) ([]ImmutableRootMetadata, error) // Put stores the metadata object for the given top-level folder. // This also adds the resulting ImmutableRootMetadata object to // the mdcache, if the Put is successful. Note that constructing // the ImmutableRootMetadata requires knowing the verifying key, // which might not be the same as the local user's verifying key // if the MD has been copied from a previous update. // // If lockContext is not nil, it causes the mdserver to check a lockID at // the time of the put, and optionally (if specified in lockContext) // releases the lock on the lock ID if the put is successful. Releasing the // lock in mdserver is idempotent. Note that journalMDOps doesn't support // lockContext for now. If journaling is enabled, use FinishSinbleOp to // require locks. // // The priority parameter specifies the priority of this particular MD put // operation. When conflict happens, mdserver tries to prioritize writes // with higher priorities. Caller should use pre-defined (or define new) // constants in keybase1 package, such as keybase1.MDPriorityNormal. Note // that journalMDOps doesn't support any priority other than // MDPriorityNormal for now. If journaling is enabled, use FinishSinbleOp // to override priority. Put(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey, lockContext *keybase1.LockContext, priority keybase1.MDPriority) ( ImmutableRootMetadata, error) // PutUnmerged is the same as the above but for unmerged metadata // history. This also adds the resulting ImmutableRootMetadata // object to the mdcache, if the PutUnmerged is successful. Note // that constructing the ImmutableRootMetadata requires knowing // the verifying key, which might not be the same as the local // user's verifying key if the MD has been copied from a previous // update. PutUnmerged(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error) // PruneBranch prunes all unmerged history for the given TLF // branch. PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error // ResolveBranch prunes all unmerged history for the given TLF // branch, and also deletes any blocks in `blocksToDelete` that // are still in the local journal. In addition, it appends the // given MD to the journal. This also adds the resulting // ImmutableRootMetadata object to the mdcache, if the // ResolveBranch is successful. Note that constructing the // ImmutableRootMetadata requires knowing the verifying key, which // might not be the same as the local user's verifying key if the // MD has been copied from a previous update. ResolveBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error) // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error) } // KeyOps fetches server-side key halves from the key server. type KeyOps interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error } // Prefetcher is an interface to a block prefetcher. type Prefetcher interface { // ProcessBlockForPrefetch potentially triggers and monitors a prefetch. ProcessBlockForPrefetch(ctx context.Context, ptr BlockPointer, block Block, kmd KeyMetadata, priority int, lifetime BlockCacheLifetime, prefetchStatus PrefetchStatus) // CancelPrefetch notifies the prefetcher that a prefetch should be // canceled. CancelPrefetch(kbfsblock.ID) // Shutdown shuts down the prefetcher idempotently. Future calls to // the various Prefetch* methods will return io.EOF. The returned channel // allows upstream components to block until all pending prefetches are // complete. This feature is mainly used for testing, but also to toggle // the prefetcher on and off. Shutdown() <-chan struct{} } // BlockOps gets and puts data blocks to a BlockServer. It performs // the necessary crypto operations on each block. type BlockOps interface { blockRetrieverGetter // Get gets the block associated with the given block pointer // (which belongs to the TLF with the given key metadata), // decrypts it if necessary, and fills in the provided block // object with its contents, if the logged-in user has read // permission for that block. cacheLifetime controls the behavior of the // write-through cache once a Get completes. Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer, block Block, cacheLifetime BlockCacheLifetime) error // GetEncodedSize gets the encoded size of the block associated // with the given block pointer (which belongs to the TLF with the // given key metadata). GetEncodedSize(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (uint32, error) // Ready turns the given block (which belongs to the TLF with // the given key metadata) into encoded (and encrypted) data, // and calculates its ID and size, so that we can do a bunch // of block puts in parallel for every write. Ready() must // guarantee that plainSize <= readyBlockData.QuotaSize(). Ready(ctx context.Context, kmd KeyMetadata, block Block) ( id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData, err error) // Delete instructs the server to delete the given block references. // It returns the number of not-yet deleted references to // each block reference Delete(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) ( liveCounts map[kbfsblock.ID]int, err error) // Archive instructs the server to mark the given block references // as "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. Archive(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) error // TogglePrefetcher activates or deactivates the prefetcher. TogglePrefetcher(enable bool) <-chan struct{} // Prefetcher retrieves this BlockOps' Prefetcher. Prefetcher() Prefetcher // Shutdown shuts down all the workers performing Get operations Shutdown() } // Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around // gomock's limitations. type authTokenRefreshHandler interface { RefreshAuthToken(context.Context) } // MDServer gets and puts metadata for each top-level directory. The // instantiation should be able to fetch session/user details via KBPKI. On a // put, the server is responsible for 1) ensuring the user has appropriate // permissions for whatever modifications were made; 2) ensuring that // LastModifyingWriter and LastModifyingUser are updated appropriately; and 3) // detecting conflicting writes based on the previous root block ID (i.e., when // it supports strict consistency). On a get, it verifies the logged-in user // has read permissions. // // TODO: Add interface for searching by time type MDServer interface { authTokenRefreshHandler // GetForHandle returns the current (signed/encrypted) metadata // object corresponding to the given top-level folder's handle, if // the logged-in user has read permission on the folder. It // creates the folder if one doesn't exist yet, and the logged-in // user has permission to do so. // // If lockBeforeGet is not nil, it takes a lock on the lock ID before // trying to get anything. If taking the lock fails, an error is returned. // Note that taking a lock from the mdserver is idempotent. // // If there is no returned error, then the returned ID must // always be non-null. A nil *RootMetadataSigned may be // returned, but if it is non-nil, then its ID must match the // returned ID. GetForHandle(ctx context.Context, handle tlf.Handle, mStatus kbfsmd.MergeStatus, lockBeforeGet *keybase1.LockID) ( tlf.ID, *RootMetadataSigned, error) // GetForTLF returns the current (signed/encrypted) metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. // // If lockBeforeGet is not nil, it takes a lock on the lock ID before // trying to get anything. If taking the lock fails, an error is returned. // Note that taking a lock from the mdserver is idempotent. GetForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus, lockBeforeGet *keybase1.LockID) (*RootMetadataSigned, error) // GetRange returns a range of (signed/encrypted) metadata objects // corresponding to the passed revision numbers (inclusive). // // If lockBeforeGet is not nil, it takes a lock on the lock ID before // trying to get anything. If taking the lock fails, an error is returned. // Note that taking a lock from the mdserver is idempotent. GetRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus, start, stop kbfsmd.Revision, lockBeforeGet *keybase1.LockID) ( []*RootMetadataSigned, error) // Put stores the (signed/encrypted) metadata object for the given // top-level folder. Note: If the unmerged bit is set in the metadata // block's flags bitmask it will be appended to the unmerged per-device // history. // // If lockContext is not nil, it causes the mdserver to check a lockID at // the time of the put, and optionally (if specified in lockContext) // releases the lock on the lock ID if the put is successful. Releasing the // lock in mdserver is idempotent. Put(ctx context.Context, rmds *RootMetadataSigned, extra kbfsmd.ExtraMetadata, lockContext *keybase1.LockContext, priority keybase1.MDPriority) error // Lock ensures lockID for tlfID is taken by this session, i.e., // idempotently take the lock. If the lock is already taken by *another* // session, mdserver returns a throttle error, causing RPC layer at client // to retry. So caller of this method should observe a behavior similar to // blocking call, which upon successful return, makes sure the lock is // taken on the server. Note that the lock expires after certain time, so // it's important to make writes contingent to the lock by requiring the // lockID in Put. Lock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error // Release Lock ensures lockID for tlfID is not taken by this session, i.e., // idempotently release the lock. If the lock is already released or // expired, this is a no-op. ReleaseLock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error // StartImplicitTeamMigration tells mdserver to put a implicit team // migration lock on id, which prevents any rekey MD writes from going // in. Normal classic MD updates can still happen after implicit team // migration has started, until a iTeam-style MD is written. StartImplicitTeamMigration(ctx context.Context, id tlf.ID) (err error) // PruneBranch prunes all unmerged history for the given TLF branch. PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error // RegisterForUpdate tells the MD server to inform the caller when // there is a merged update with a revision number greater than // currHead, which did NOT originate from this same MD server // session. This method returns a chan which can receive only a // single error before it's closed. If the received err is nil, // then there is updated MD ready to fetch which didn't originate // locally; if it is non-nil, then the previous registration // cannot send the next notification (e.g., the connection to the // MD server may have failed). In either case, the caller must // re-register to get a new chan that can receive future update // notifications. RegisterForUpdate(ctx context.Context, id tlf.ID, currHead kbfsmd.Revision) (<-chan error, error) // CancelRegistration lets the local MDServer instance know that // we are no longer interested in updates for the specified // folder. It does not necessarily forward this cancellation to // remote servers. CancelRegistration(ctx context.Context, id tlf.ID) // CheckForRekeys initiates the rekey checking process on the // server. The server is allowed to delay this request, and so it // returns a channel for returning the error. Actual rekey // requests are expected to come in asynchronously. CheckForRekeys(ctx context.Context) <-chan error // TruncateLock attempts to take the history truncation lock for // this folder, for a TTL defined by the server. Returns true if // the lock was successfully taken. TruncateLock(ctx context.Context, id tlf.ID) (bool, error) // TruncateUnlock attempts to release the history truncation lock // for this folder. Returns true if the lock was successfully // released. TruncateUnlock(ctx context.Context, id tlf.ID) (bool, error) // DisableRekeyUpdatesForTesting disables processing rekey updates // received from the mdserver while testing. DisableRekeyUpdatesForTesting() // Shutdown is called to shutdown an MDServer connection. Shutdown() // IsConnected returns whether the MDServer is connected. IsConnected() bool // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. For the highest level of confidence, the caller // should verify the mapping with a Merkle tree lookup. GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error) // OffsetFromServerTime is the current estimate for how off our // local clock is from the mdserver clock. Add this to any // mdserver-provided timestamps to get the "local" time of the // corresponding event. If the returned bool is false, then we // don't have a current estimate for the offset. OffsetFromServerTime() (time.Duration, bool) // GetKeyBundles looks up the key bundles for the given key // bundle IDs. tlfID must be non-zero but either or both wkbID // and rkbID can be zero, in which case nil will be returned // for the respective bundle. If a bundle cannot be found, an // error is returned and nils are returned for both bundles. GetKeyBundles(ctx context.Context, tlfID tlf.ID, wkbID kbfsmd.TLFWriterKeyBundleID, rkbID kbfsmd.TLFReaderKeyBundleID) ( *kbfsmd.TLFWriterKeyBundleV3, *kbfsmd.TLFReaderKeyBundleV3, error) // CheckReachability is called when the Keybase service sends a notification // that network connectivity has changed. CheckReachability(ctx context.Context) // FastForwardBackoff fast forwards any existing backoff timer for // reconnects. If MD server is connected at the time this is called, it's // essentially a no-op. FastForwardBackoff() } type mdServerLocal interface { MDServer addNewAssertionForTest( uid keybase1.UID, newAssertion keybase1.SocialAssertion) error getCurrentMergedHeadRevision(ctx context.Context, id tlf.ID) ( rev kbfsmd.Revision, err error) isShutdown() bool copy(config mdServerLocalConfig) mdServerLocal enableImplicitTeams() } // BlockServer gets and puts opaque data blocks. The instantiation // should be able to fetch session/user details via KBPKI. On a // put/delete, the server is reponsible for: 1) checking that the ID // matches the hash of the buffer; and 2) enforcing writer quotas. type BlockServer interface { authTokenRefreshHandler // Get gets the (encrypted) block data associated with the given // block ID and context, uses the provided block key to decrypt // the block, and fills in the provided block object with its // contents, if the logged-in user has read permission for that // block. Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) ( []byte, kbfscrypto.BlockCryptKeyServerHalf, error) // Put stores the (encrypted) block data under the given ID // and context on the server, along with the server half of // the block key. context should contain a kbfsblock.RefNonce // of zero. There will be an initial reference for this block // for the given context. // // Put should be idempotent, although it should also return an // error if, for a given ID, any of the other arguments differ // from previous Put calls with the same ID. // // If this returns a kbfsblock.ServerErrorOverQuota, with // Throttled=false, the caller can treat it as informational // and otherwise ignore the error. Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // PutAgain re-stores a previously deleted block under the same ID // with the same data. PutAgain(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // AddBlockReference adds a new reference to the given block, // defined by the given context (which should contain a // non-zero kbfsblock.RefNonce). (Contexts with a // kbfsblock.RefNonce of zero should be used when putting the // block for the first time via Put().) Returns a // kbfsblock.ServerErrorBlockNonExistent if id is unknown within this // folder. // // AddBlockReference should be idempotent, although it should // also return an error if, for a given ID and refnonce, any // of the other fields of context differ from previous // AddBlockReference calls with the same ID and refnonce. // // If this returns a kbfsblock.ServerErrorOverQuota, with // Throttled=false, the caller can treat it as informational // and otherwise ignore the error. AddBlockReference(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) error // RemoveBlockReferences removes the references to the given block // ID defined by the given contexts. If no references to the block // remain after this call, the server is allowed to delete the // corresponding block permanently. If the reference defined by // the count has already been removed, the call is a no-op. // It returns the number of remaining not-yet-deleted references after this // reference has been removed RemoveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error) // ArchiveBlockReferences marks the given block references as // "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. // // For a given ID/refnonce pair, ArchiveBlockReferences should // be idempotent, although it should also return an error if // any of the other fields of the context differ from previous // calls with the same ID/refnonce pair. ArchiveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) error // IsUnflushed returns whether a given block is being queued // locally for later flushing to another block server. If the // block is currently being flushed to the server, this should // return `true`, so that the caller will try to clean it up from // the server if it's no longer needed. IsUnflushed(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID) ( bool, error) // Shutdown is called to shutdown a BlockServer connection. Shutdown(ctx context.Context) // GetUserQuotaInfo returns the quota for the logged-in user. GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.QuotaInfo, err error) // GetTeamQuotaInfo returns the quota for a team. GetTeamQuotaInfo(ctx context.Context, tid keybase1.TeamID) ( info *kbfsblock.QuotaInfo, err error) } // blockServerLocal is the interface for BlockServer implementations // that store data locally. type blockServerLocal interface { BlockServer // getAllRefsForTest returns all the known block references // for the given TLF, and should only be used during testing. getAllRefsForTest(ctx context.Context, tlfID tlf.ID) ( map[kbfsblock.ID]blockRefMap, error) } // BlockSplitter decides when a file or directory block needs to be split type BlockSplitter interface { // CopyUntilSplit copies data into the block until we reach the // point where we should split, but only if writing to the end of // the last block. If this is writing into the middle of a file, // just copy everything that will fit into the block, and assume // that block boundaries will be fixed later. Return how much was // copied. CopyUntilSplit( block *FileBlock, lastBlock bool, data []byte, off int64) int64 // CheckSplit, given a block, figures out whether it ends at the // right place. If so, return 0. If not, return either the // offset in the block where it should be split, or -1 if more // bytes from the next block should be appended. CheckSplit(block *FileBlock) int64 // MaxPtrsPerBlock describes the number of indirect pointers we // can fit into one indirect block. MaxPtrsPerBlock() int // ShouldEmbedBlockChanges decides whether we should keep the // block changes embedded in the MD or not. ShouldEmbedBlockChanges(bc *BlockChanges) bool } // KeyServer fetches/writes server-side key halves from/to the key server. type KeyServer interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error // Shutdown is called to free any KeyServer resources. Shutdown() } // NodeChange represents a change made to a node as part of an atomic // file system operation. type NodeChange struct { Node Node // Basenames of entries added/removed. DirUpdated []string FileUpdated []WriteRange } // Observer can be notified that there is an available update for a // given directory. The notification callbacks should not block, or // make any calls to the Notifier interface. Nodes passed to the // observer should not be held past the end of the notification // callback. type Observer interface { // LocalChange announces that the file at this Node has been // updated locally, but not yet saved at the server. LocalChange(ctx context.Context, node Node, write WriteRange) // BatchChanges announces that the nodes have all been updated // together atomically. Each NodeChange in changes affects the // same top-level folder and branch. BatchChanges(ctx context.Context, changes []NodeChange) // TlfHandleChange announces that the handle of the corresponding // folder branch has changed, likely due to previously-unresolved // assertions becoming resolved. This indicates that the listener // should switch over any cached paths for this folder-branch to // the new name. Nodes that were acquired under the old name will // still continue to work, but new lookups on the old name may // either encounter alias errors or entirely new TLFs (in the case // of conflicts). TlfHandleChange(ctx context.Context, newHandle *TlfHandle) } // Notifier notifies registrants of directory changes type Notifier interface { // RegisterForChanges declares that the given Observer wants to // subscribe to updates for the given top-level folders. RegisterForChanges(folderBranches []FolderBranch, obs Observer) error // UnregisterFromChanges declares that the given Observer no // longer wants to subscribe to updates for the given top-level // folders. UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error } // Clock is an interface for getting the current time type Clock interface { // Now returns the current time. Now() time.Time } // ConflictRenamer deals with names for conflicting directory entries. type ConflictRenamer interface { // ConflictRename returns the appropriately modified filename. ConflictRename(ctx context.Context, op op, original string) ( string, error) } // Tracer maybe adds traces to contexts. type Tracer interface { // MaybeStartTrace, if tracing is on, returns a new context // based on the given one with an attached trace made with the // given family and title. Otherwise, it returns the given // context unchanged. MaybeStartTrace(ctx context.Context, family, title string) context.Context // MaybeFinishTrace, finishes the trace attached to the given // context, if any. MaybeFinishTrace(ctx context.Context, err error) } type initModeGetter interface { // Mode indicates how KBFS is configured to run. Mode() InitMode // IsTestMode() inidicates whether KBFS is running in a test. IsTestMode() bool } // Config collects all the singleton instance instantiations needed to // run KBFS in one place. The methods below are self-explanatory and // do not require comments. type Config interface { dataVersioner logMaker blockCacher blockServerGetter codecGetter cryptoPureGetter keyGetterGetter cryptoGetter signerGetter currentSessionGetterGetter diskBlockCacheGetter diskBlockCacheSetter clockGetter diskLimiterGetter syncedTlfGetterSetter initModeGetter Tracer KBFSOps() KBFSOps SetKBFSOps(KBFSOps) KBPKI() KBPKI SetKBPKI(KBPKI) KeyManager() KeyManager SetKeyManager(KeyManager) Reporter() Reporter SetReporter(Reporter) MDCache() MDCache SetMDCache(MDCache) KeyCache() KeyCache SetKeyBundleCache(kbfsmd.KeyBundleCache) KeyBundleCache() kbfsmd.KeyBundleCache SetKeyCache(KeyCache) SetBlockCache(BlockCache) DirtyBlockCache() DirtyBlockCache SetDirtyBlockCache(DirtyBlockCache) SetCrypto(Crypto) SetCodec(kbfscodec.Codec) MDOps() MDOps SetMDOps(MDOps) KeyOps() KeyOps SetKeyOps(KeyOps) BlockOps() BlockOps SetBlockOps(BlockOps) MDServer() MDServer SetMDServer(MDServer) SetBlockServer(BlockServer) KeyServer() KeyServer SetKeyServer(KeyServer) KeybaseService() KeybaseService SetKeybaseService(KeybaseService) BlockSplitter() BlockSplitter SetBlockSplitter(BlockSplitter) Notifier() Notifier SetNotifier(Notifier) SetClock(Clock) ConflictRenamer() ConflictRenamer SetConflictRenamer(ConflictRenamer) MetadataVersion() kbfsmd.MetadataVer SetMetadataVersion(kbfsmd.MetadataVer) DefaultBlockType() keybase1.BlockType SetDefaultBlockType(blockType keybase1.BlockType) RekeyQueue() RekeyQueue SetRekeyQueue(RekeyQueue) // ReqsBufSize indicates the number of read or write operations // that can be buffered per folder ReqsBufSize() int // MaxNameBytes indicates the maximum supported size of a // directory entry name in bytes. MaxNameBytes() uint32 // MaxDirBytes indicates the maximum supported plaintext size of a // directory in bytes. MaxDirBytes() uint64 // DoBackgroundFlushes says whether we should periodically try to // flush dirty files, even without a sync from the user. Should // be true except for during some testing. DoBackgroundFlushes() bool SetDoBackgroundFlushes(bool) // RekeyWithPromptWaitTime indicates how long to wait, after // setting the rekey bit, before prompting for a paper key. RekeyWithPromptWaitTime() time.Duration SetRekeyWithPromptWaitTime(time.Duration) // PrefetchStatus returns the prefetch status of a block. PrefetchStatus(context.Context, tlf.ID, BlockPointer) PrefetchStatus // GracePeriod specifies a grace period for which a delayed cancellation // waits before actual cancels the context. This is useful for giving // critical portion of a slow remote operation some extra time to finish as // an effort to avoid conflicting. Example include an O_EXCL Create call // interrupted by ALRM signal actually makes it to the server, while // application assumes not since EINTR is returned. A delayed cancellation // allows us to distinguish between successful cancel (where remote operation // didn't make to server) or failed cancel (where remote operation made to // the server). However, the optimal value of this depends on the network // conditions. A long grace period for really good network condition would // just unnecessarily slow down Ctrl-C. // // TODO: make this adaptive and self-change over time based on network // conditions. DelayedCancellationGracePeriod() time.Duration SetDelayedCancellationGracePeriod(time.Duration) // QuotaReclamationPeriod indicates how often should each TLF // should check for quota to reclaim. If the Duration.Seconds() // == 0, quota reclamation should not run automatically. QuotaReclamationPeriod() time.Duration // QuotaReclamationMinUnrefAge indicates the minimum time a block // must have been unreferenced before it can be reclaimed. QuotaReclamationMinUnrefAge() time.Duration // QuotaReclamationMinHeadAge indicates the minimum age of the // most recently merged MD update before we can run reclamation, // to avoid conflicting with a currently active writer. QuotaReclamationMinHeadAge() time.Duration // ResetCaches clears and re-initializes all data and key caches. ResetCaches() // StorageRoot returns the path to the storage root for this config. StorageRoot() string // MetricsRegistry may be nil, which should be interpreted as // not using metrics at all. (i.e., as if UseNilMetrics were // set). This differs from how go-metrics treats nil Registry // objects, which is to use the default registry. MetricsRegistry() metrics.Registry SetMetricsRegistry(metrics.Registry) // SetTraceOptions set the options for tracing (via x/net/trace). SetTraceOptions(enabled bool) // TLFValidDuration is the time TLFs are valid before identification needs to be redone. TLFValidDuration() time.Duration // SetTLFValidDuration sets TLFValidDuration. SetTLFValidDuration(time.Duration) // BGFlushDirOpBatchSize returns the directory op batch size for // background flushes. BGFlushDirOpBatchSize() int // SetBGFlushDirOpBatchSize sets the directory op batch size for // background flushes. SetBGFlushDirOpBatchSize(s int) // BGFlushPeriod returns how long to wait for a batch to fill up // before syncing a set of changes to the servers. BGFlushPeriod() time.Duration // SetBGFlushPeriod sets how long to wait for a batch to fill up // before syncing a set of changes to the servers. SetBGFlushPeriod(p time.Duration) // Shutdown is called to free config resources. Shutdown(context.Context) error // CheckStateOnShutdown tells the caller whether or not it is safe // to check the state of the system on shutdown. CheckStateOnShutdown() bool // GetRekeyFSMLimiter returns the global rekey FSM limiter. GetRekeyFSMLimiter() *OngoingWorkLimiter // RootNodeWrappers returns the set of root node wrapper functions // that will be applied to each newly-created root node. RootNodeWrappers() []func(Node) Node // AddRootNodeWrapper adds a new wrapper function that will be // applied whenever a root Node is created. This will only apply // to TLFs that are first accessed after `AddRootNodeWrapper` is // called. AddRootNodeWrapper(func(Node) Node) } // NodeCache holds Nodes, and allows libkbfs to update them when // things change about the underlying KBFS blocks. It is probably // most useful to instantiate this on a per-folder-branch basis, so // that it can create a Path with the correct DirId and Branch name. type NodeCache interface { // GetOrCreate either makes a new Node for the given // BlockPointer, or returns an existing one. TODO: If we ever // support hard links, we will have to revisit the "name" and // "parent" parameters here. name must not be empty. Returns // an error if parent cannot be found. GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error) // Get returns the Node associated with the given ptr if one // already exists. Otherwise, it returns nil. Get(ref BlockRef) Node // UpdatePointer updates the BlockPointer for the corresponding // Node. NodeCache ignores this call when oldRef is not cached in // any Node. Returns whether the pointer was updated. UpdatePointer(oldRef BlockRef, newPtr BlockPointer) bool // Move swaps the parent node for the corresponding Node, and // updates the node's name. NodeCache ignores the call when ptr // is not cached. If newParent is nil, it treats the ptr's // corresponding node as being unlinked from the old parent // completely. If successful, it returns a function that can be // called to undo the effect of the move (or `nil` if nothing // needs to be done); if newParent cannot be found, it returns an // error and a `nil` undo function. Move(ref BlockRef, newParent Node, newName string) ( undoFn func(), err error) // Unlink set the corresponding node's parent to nil and caches // the provided path in case the node is still open. NodeCache // ignores the call when ptr is not cached. The path is required // because the caller may have made changes to the parent nodes // already that shouldn't be reflected in the cached path. It // returns a function that can be called to undo the effect of the // unlink (or `nil` if nothing needs to be done). Unlink(ref BlockRef, oldPath path, oldDe DirEntry) (undoFn func()) // IsUnlinked returns whether `Unlink` has been called for the // reference behind this node. IsUnlinked(node Node) bool // UnlinkedDirEntry returns a pointer to a modifiable directory // entry if `Unlink` has been called for the reference behind this // node. UnlinkedDirEntry(node Node) DirEntry // PathFromNode creates the path up to a given Node. PathFromNode(node Node) path // AllNodes returns the complete set of nodes currently in the cache. AllNodes() []Node // AddRootWrapper adds a new wrapper function that will be applied // whenever a root Node is created. AddRootWrapper(func(Node) Node) } // fileBlockDeepCopier fetches a file block, makes a deep copy of it // (duplicating pointer for any indirect blocks) and generates a new // random temporary block ID for it. It returns the new BlockPointer, // and internally saves the block for future uses. type fileBlockDeepCopier func(context.Context, string, BlockPointer) ( BlockPointer, error) // crAction represents a specific action to take as part of the // conflict resolution process. type crAction interface { // swapUnmergedBlock should be called before do(), and if it // returns true, the caller must use the merged block // corresponding to the returned BlockPointer instead of // unmergedBlock when calling do(). If BlockPointer{} is zeroPtr // (and true is returned), just swap in the regular mergedBlock. swapUnmergedBlock(unmergedChains *crChains, mergedChains *crChains, unmergedBlock *DirBlock) (bool, BlockPointer, error) // do modifies the given merged block in place to resolve the // conflict, and potential uses the provided blockCopyFetchers to // obtain copies of other blocks (along with new BlockPointers) // when requiring a block copy. do(ctx context.Context, unmergedCopier fileBlockDeepCopier, mergedCopier fileBlockDeepCopier, unmergedBlock *DirBlock, mergedBlock *DirBlock) error // updateOps potentially modifies, in place, the slices of // unmerged and merged operations stored in the corresponding // crChains for the given unmerged and merged most recent // pointers. Eventually, the "unmerged" ops will be pushed as // part of a MD update, and so should contain any necessarily // operations to fully merge the unmerged data, including any // conflict resolution. The "merged" ops will be played through // locally, to notify any caches about the newly-obtained merged // data (and any changes to local data that were required as part // of conflict resolution, such as renames). A few things to note: // * A particular action's updateOps method may be called more than // once for different sets of chains, however it should only add // new directory operations (like create/rm/rename) into directory // chains. // * updateOps doesn't necessarily result in correct BlockPointers within // each of those ops; that must happen in a later phase. // * mergedBlock can be nil if the chain is for a file. updateOps(unmergedMostRecent BlockPointer, mergedMostRecent BlockPointer, unmergedBlock *DirBlock, mergedBlock *DirBlock, unmergedChains *crChains, mergedChains *crChains) error // String returns a string representation for this crAction, used // for debugging. String() string } // RekeyQueue is a managed queue of folders needing some rekey action taken // upon them by the current client. type RekeyQueue interface { // Enqueue enqueues a folder for rekey action. If the TLF is already in the // rekey queue, the error channel of the existing one is returned. Enqueue(tlf.ID) // IsRekeyPending returns true if the given folder is in the rekey queue. // Note that an ongoing rekey doesn't count as "pending". IsRekeyPending(tlf.ID) bool // Shutdown cancels all pending rekey actions and clears the queue. It // doesn't cancel ongoing rekeys. After Shutdown() is called, the same // RekeyQueue shouldn't be used anymore. Shutdown() } // RekeyFSM is a Finite State Machine (FSM) for housekeeping rekey states for a // FolderBranch. Each FolderBranch has its own FSM for rekeys. // // See rekey_fsm.go for implementation details. // // TODO: report FSM status in FolderBranchStatus? type RekeyFSM interface { // Event sends an event to the FSM. Event(event RekeyEvent) // Shutdown shuts down the FSM. No new event should be sent into the FSM // after this method is called. Shutdown() // listenOnEvent adds a listener (callback) to the FSM so that when // event happens, callback is called with the received event. If repeatedly // is set to false, callback is called only once. Otherwise it's called every // time event happens. // // Currently this is only used in tests and for RekeyFile. See comment for // RequestRekeyAndWaitForOneFinishEvent for more details. listenOnEvent( event rekeyEventType, callback func(RekeyEvent), repeatedly bool) } // BlockRetriever specifies how to retrieve blocks. type BlockRetriever interface { // Request retrieves blocks asynchronously. Request(ctx context.Context, priority int, kmd KeyMetadata, ptr BlockPointer, block Block, lifetime BlockCacheLifetime) <-chan error // RequestNoPrefetch retrieves blocks asynchronously, but doesn't trigger a // prefetch unless the block had to be retrieved from the server. RequestNoPrefetch(ctx context.Context, priority int, kmd KeyMetadata, ptr BlockPointer, block Block, lifetime BlockCacheLifetime) <-chan error // PutInCaches puts the block into the in-memory cache, and ensures that // the disk cache metadata is updated. PutInCaches(ctx context.Context, ptr BlockPointer, tlfID tlf.ID, block Block, lifetime BlockCacheLifetime, prefetchStatus PrefetchStatus) error // TogglePrefetcher creates a new prefetcher. TogglePrefetcher(enable bool, syncCh <-chan struct{}) <-chan struct{} }
1
19,036
What happens if nodes throughout a hierarchy are modified (as they would be)? It looks like we're going to `Reset` once for each `NodeID` affected.
keybase-kbfs
go
@@ -1065,7 +1065,6 @@ error: * process, mode_switch_buf_sz is maximum size for switch code, and * mode_switch_data is the address where the app stack pointer is stored. */ - static size_t generate_switch_mode_jmp_to_hook(HANDLE phandle, byte *local_code_buf, byte *mode_switch_buf, byte *hook_location,
1
/* ********************************************************** * Copyright (c) 2011-2021 Google, Inc. All rights reserved. * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2000-2001 Hewlett-Packard Company */ /* * inject.c - injects dynamo into a new thread */ /* FIXME: Unicode support?!?! case 61 */ #include "../globals.h" /* for pragma warning's and assert defines */ #include "../module_shared.h" /* for d_r_get_proc_address() */ #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <stdio.h> #include "ntdll.h" /* for get/set context etc. */ #include "instr.h" #include "instr_create_shared.h" #include "decode.h" /* i#1597: to prevent an IAT hooking injected library in drrun or a tool * front-end from redirecting kernel32!LoadLibrary and kernel32!GetProcAddress * to the inject lib itself, which won't be there in the child, it's best * to use DR's d_r_get_proc_address(). We're already linking w/ the files we need. */ #include "os_private.h" /* for d_r_get_proc_address() and load_dynamo */ #define GET_PROC_ADDR d_r_get_proc_address /* this entry point is hardcoded, FIXME : abstract */ #define DYNAMORIO_ENTRY "dynamo_auto_start" #ifdef DEBUG /* for asserts, we import globals.h now (for pragmas) so don't need to * duplicate assert defines, declarations */ extern void display_error(char *msg); #else # define display_error(msg) ((void)0) #endif /* get_module_handle is unsafe to call at arbitrary point from the core so move * all uses in inject.c to separate init function which can be called at a safe * point */ static ptr_uint_t addr_getprocaddr; static ptr_uint_t addr_loadlibrarya; #ifdef LOAD_DYNAMO_DEBUGBREAK static ptr_uint_t addr_debugbreak; #endif static bool inject_initialized = false; void inject_init() { HANDLE kern32 = get_module_handle(L"KERNEL32.DLL"); ASSERT(kern32 != NULL); addr_getprocaddr = (ptr_uint_t)GET_PROC_ADDR(kern32, "GetProcAddress"); ASSERT(addr_getprocaddr != 0); addr_loadlibrarya = (ptr_uint_t)GET_PROC_ADDR(kern32, "LoadLibraryA"); ASSERT(addr_loadlibrarya != 0); #ifdef LOAD_DYNAMO_DEBUGBREAK addr_debugbreak = (ptr_uint_t)GET_PROC_ADDR(kern32, "DebugBreak"); ASSERT(addr_debugbreak != NULL); #endif inject_initialized = true; } /* change this if load_dynamo changes * 128 is more than enough room even with all debugging code in there */ #define SIZE_OF_LOAD_DYNAMO 128 /* pass non-NULL for thandle if you want this routine to use * Get/SetThreadContext to get the context -- you must still pass * in a pointer to a cxt */ bool inject_into_thread(HANDLE phandle, CONTEXT *cxt, HANDLE thandle, char *dynamo_path) { size_t nbytes; bool success = false; ptr_uint_t dynamo_entry_esp; ptr_uint_t dynamo_path_esp; LPVOID load_dynamo_code = NULL; /* = base of code allocation */ ptr_uint_t addr; reg_t *bufptr; char buf[MAX_PATH * 3]; uint old_prot; ASSERT(cxt != NULL); #ifndef NOT_DYNAMORIO_CORE_PROPER /* FIXME - if we were early injected we couldn't call inject_init during * startup because kernel32 wasn't loaded yet, so we call it here which * isn't safe because it uses app locks. If we want to support a mix * of early and late follow children injection we should change load_dynamo * to use Nt functions (which we can link) rather then kernel32 functions * (which we have to look up). We could also use module.c code to safely * walk the exports of kernel32.dll (we can cache its mod handle when it * is loaded). */ if (!inject_initialized) { SYSLOG_INTERNAL_WARNING("Using late inject follow children from early injected " "process, unsafe LdrLock usage"); SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); inject_init(); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } #else ASSERT(inject_initialized); #endif /* soon we'll start using alternative injection with case 102 - leaving block */ { reg_t app_xsp; if (thandle != NULL) { /* grab the context of the app's main thread */ /* we can't use proc_has_feature() so no CONTEXT_DR_STATE */ cxt->ContextFlags = CONTEXT_DR_STATE_ALLPROC; if (!NT_SUCCESS(nt_get_context(thandle, cxt))) { display_error("GetThreadContext failed"); goto error; } } app_xsp = cxt->CXT_XSP; /* copy load_dynamo() into the address space of the new process */ ASSERT(BUFFER_SIZE_BYTES(buf) > SIZE_OF_LOAD_DYNAMO); memcpy(buf, (char *)load_dynamo, SIZE_OF_LOAD_DYNAMO); /* R-X protection is adequate for our non-self modifying code, * and we'll update that after we're done with * nt_write_virtual_memory() calls */ /* get allocation, this will be freed by os_heap_free, so make sure * is compatible allocation method */ if (!NT_SUCCESS(nt_remote_allocate_virtual_memory( phandle, &load_dynamo_code, SIZE_OF_LOAD_DYNAMO, PAGE_EXECUTE_READWRITE, MEMORY_COMMIT))) { display_error("Failed to allocate memory for injection code"); goto error; } if (!nt_write_virtual_memory(phandle, load_dynamo_code, buf, SIZE_OF_LOAD_DYNAMO, NULL)) { display_error("WriteMemory failed"); goto error; } /* Xref PR 252745 & PR 252008 - we can use the app's stack to hold our data * even on WOW64 and 64-bit since we're using set context to set xsp. */ /* copy the DYNAMORIO_ENTRY string to the app's stack */ _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s", DYNAMORIO_ENTRY); NULL_TERMINATE_BUFFER(buf); nbytes = strlen(buf) + 1; // include the trailing '\0' /* keep esp at pointer-sized alignment */ cxt->CXT_XSP -= ALIGN_FORWARD(nbytes, XSP_SZ); dynamo_entry_esp = cxt->CXT_XSP; if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, buf, nbytes, NULL)) { display_error("WriteMemory failed"); goto error; } /* copy the dynamorio_path string to the app's stack */ _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s", dynamo_path); NULL_TERMINATE_BUFFER(buf); nbytes = strlen(buf) + 1; // include the trailing '\0' /* keep esp at pointer-sized byte alignment */ cxt->CXT_XSP -= ALIGN_FORWARD(nbytes, XSP_SZ); dynamo_path_esp = cxt->CXT_XSP; if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, buf, nbytes, NULL)) { display_error("WriteMemory failed"); goto error; } /* copy the current context to the app's stack. Only need the * control registers, so we use a priv_mcontext_t layout. */ ASSERT(BUFFER_SIZE_BYTES(buf) >= sizeof(priv_mcontext_t)); bufptr = (reg_t *)buf; *bufptr++ = cxt->CXT_XDI; *bufptr++ = cxt->CXT_XSI; *bufptr++ = cxt->CXT_XBP; *bufptr++ = app_xsp; *bufptr++ = cxt->CXT_XBX; *bufptr++ = cxt->CXT_XDX; *bufptr++ = cxt->CXT_XCX; *bufptr++ = cxt->CXT_XAX; #ifdef X64 *bufptr++ = cxt->R8; *bufptr++ = cxt->R9; *bufptr++ = cxt->R10; *bufptr++ = cxt->R11; *bufptr++ = cxt->R12; *bufptr++ = cxt->R13; *bufptr++ = cxt->R14; *bufptr++ = cxt->R15; #endif *bufptr++ = cxt->CXT_XFLAGS; *bufptr++ = cxt->CXT_XIP; bufptr += PRE_XMM_PADDING / sizeof(*bufptr); /* It would be nice to use preserve_xmm_caller_saved(), but we'd need to * link proc.c and deal w/ messy dependencies to get it into arch_exports.h, * so we do our own check. We go ahead and put in the xmm slots even * if the underlying processor has no xmm support: no harm done. */ if (IF_X64_ELSE(true, is_wow64_process(NT_CURRENT_PROCESS))) { /* PR 264138: preserve xmm0-5. We fill in all slots even though * for 32-bit we don't use them (PR 306394). */ int i, j; /* For x86, ensure we have ExtendedRegisters space (i#1223) */ IF_NOT_X64(ASSERT(TEST(CONTEXT_XMM_FLAG, cxt->ContextFlags))); /* XXX i#1312: This should be proc_num_simd_sse_avx_registers(). */ ASSERT(MCXT_SIMD_SLOT_SIZE == ZMM_REG_SIZE); for (i = 0; i < MCXT_NUM_SIMD_SLOTS; i++) { for (j = 0; j < XMM_REG_SIZE / sizeof(*bufptr); j++) { *bufptr++ = CXT_XMM(cxt, i)->reg[j]; } /* FIXME i#437: save ymm fields. For now we assume we're * not saving and we just skip the upper 128 bits. */ bufptr += (ZMM_REG_SIZE - XMM_REG_SIZE) / sizeof(*bufptr); } } else { /* skip xmm slots */ bufptr += MCXT_TOTAL_SIMD_SLOTS_SIZE / sizeof(*bufptr); } /* TODO i#1312: the zmm and mask fields need to be copied. */ bufptr += MCXT_TOTAL_OPMASK_SLOTS_SIZE / sizeof(*bufptr); ASSERT((char *)bufptr - (char *)buf == sizeof(priv_mcontext_t)); *bufptr++ = (ptr_uint_t)load_dynamo_code; *bufptr++ = SIZE_OF_LOAD_DYNAMO; nbytes = sizeof(priv_mcontext_t) + 2 * sizeof(reg_t); cxt->CXT_XSP -= nbytes; #ifdef X64 /* We need xsp to be aligned prior to each call, but we can only pad * before the context as all later users assume the info they need is * at TOS. */ cxt->CXT_XSP = ALIGN_BACKWARD(cxt->CXT_XSP, 16); #endif if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, buf, nbytes, NULL)) { display_error("WriteMemory failed"); goto error; } /* push the address of the DYNAMORIO_ENTRY string on the app's stack */ cxt->CXT_XSP -= XSP_SZ; if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, &dynamo_entry_esp, sizeof(dynamo_entry_esp), &nbytes)) { display_error("WriteMemory failed"); goto error; } /* push the address of GetProcAddress on the app's stack */ ASSERT(addr_getprocaddr); addr = addr_getprocaddr; cxt->CXT_XSP -= XSP_SZ; if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, &addr, sizeof(addr), NULL)) { display_error("WriteMemory failed"); goto error; } /* push the address of the dynamorio_path string on the app's stack */ cxt->CXT_XSP -= XSP_SZ; if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, &dynamo_path_esp, sizeof(dynamo_path_esp), &nbytes)) { display_error("WriteMemory failed"); goto error; } /* push the address of LoadLibraryA on the app's stack */ ASSERT(addr_loadlibrarya); addr = addr_loadlibrarya; cxt->CXT_XSP -= XSP_SZ; if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, &addr, sizeof(addr), NULL)) { display_error("WriteMemory failed"); goto error; } #ifdef LOAD_DYNAMO_DEBUGBREAK /* push the address of DebugBreak on the app's stack */ ASSERT(addr_debugbreak); addr = addr_debugbreak; cxt->CXT_XSP -= XSP_SZ; if (!nt_write_virtual_memory(phandle, (LPVOID)cxt->CXT_XSP, &addr, sizeof(addr), NULL)) { display_error("WriteMemory failed"); goto error; } #endif /* make the code R-X now */ if (!nt_remote_protect_virtual_memory(phandle, load_dynamo_code, SIZE_OF_LOAD_DYNAMO, PAGE_EXECUTE_READ, &old_prot)) { display_error("Failed to make injection code R-X"); goto error; } ASSERT(old_prot == PAGE_EXECUTE_READWRITE); /* now change Eip to point to the entry point of load_dynamo(), so that when we resume, load_dynamo is invoked automatically */ cxt->CXT_XIP = (ptr_uint_t)load_dynamo_code; cxt->CXT_XFLAGS = 0; if (thandle != NULL) { if (!NT_SUCCESS(nt_set_context(thandle, cxt))) { display_error("SetThreadContext failed"); goto error; } } success = true; } error: /* we do not recover any changes in the child's address space */ return success; } /* FIXME - would be nicer to use instrlist etc. to generate and emit the code * (with patch list for the calls), but we'll also likely want to use this for * drinject which would mean getting most of the core compiled into that. Prob. * should still do it, but writing like this isn't that hard. Another * possibility is to export this from a special/standalone build of dr that * injector can load, that would also make it easier for injector to find * Ldr* addresses. At the very least we should combine all these enums (instr.h * os_shared.h, emit_utils.c etc.) in one place. * * UPDATE: with drdecode (i#617) for use in drinject, we can use DR's * IR and should for any future code. */ enum { PUSHF = 0x9c, POPF = 0x9d, PUSHA = 0x60, POPA = 0x61, PUSH_EAX = 0x50, POP_EAX = 0x58, PUSH_ECX = 0x51, POP_ECX = 0x59, PUSH_IMM32 = 0x68, PUSH_IMM8 = 0x6a, JMP_REL8 = 0xeb, JMP_REL32 = 0xe9, CALL_REL32 = 0xe8, CALL_RM32 = 0xff, CALL_EAX_RM = 0xd0, JMP_FAR_DIRECT = 0xea, MOV_RM32_2_REG32 = 0x8b, MOV_REG32_2_RM32 = 0x89, MOV_ESP_2_EAX_RM = 0xc4, MOV_EAX_2_ECX_RM = 0xc8, MOV_EAX_2_EDX_RM = 0xd0, MOV_EAX_2_EAX_RM = 0xc0, MOV_derefEAX_2_EAX_RM = 0x00, MOV_deref_disp8_EAX_2_EAX_RM = 0x40, MOV_IMM8_2_RM8 = 0xc6, MOV_IMM32_2_RM32 = 0xc7, MOV_IMM_RM_ABS = 0x05, MOV_IMM_XAX = 0xb8, ADD_EAX_IMM32 = 0x05, AND_RM32_IMM32 = 0x81, CMP_EAX_IMM32 = 0x3d, JZ_REL8 = 0x74, JNZ_REL8 = 0x75, REX_W = 0x48, REX_B = 0x41, REX_R = 0x44, }; #define DEBUG_LOOP 0 #define ASSERT_ROOM(cur, buf, maxlen) ASSERT(cur + maxlen < buf + sizeof(buf)) #define RAW_INSERT_INT16(pos, value) \ do { \ ASSERT(CHECK_TRUNCATE_TYPE_short((ptr_int_t)(value))); \ *(short *)(pos) = (short)(value); \ (pos) += sizeof(short); \ } while (0) #define RAW_INSERT_INT32(pos, value) \ do { \ ASSERT(CHECK_TRUNCATE_TYPE_int((ptr_int_t)(value))); \ *(int *)(pos) = (int)(ptr_int_t)(value); \ (pos) += sizeof(int); \ } while (0) #define RAW_INSERT_INT64(pos, value) \ do { \ *(int64 *)(pos) = (int64)(value); \ (pos) += sizeof(int64); \ } while (0) #define RAW_INSERT_INT8(pos, value) \ do { \ ASSERT(CHECK_TRUNCATE_TYPE_sbyte((int)value)); \ *(char *)(pos) = (char)(value); \ (pos) += sizeof(char); \ } while (0) #define RAW_PUSH_INT64(pos, value) \ do { \ *(pos)++ = PUSH_IMM32; \ RAW_INSERT_INT32(pos, (int)value); \ /* Push is sign-extended, so we can skip top half if top 33 bits are 0. */ \ if ((uint64)(value) >= 0x80000000UL) { \ *(pos)++ = MOV_IMM32_2_RM32; \ *(pos)++ = 0x44; \ *(pos)++ = 0x24; \ *(pos)++ = 0x04; /* xsp+4 */ \ RAW_INSERT_INT32(pos, (value) >> 32); \ } \ } while (0) #define RAW_PUSH_INT32(pos, value) \ do { \ *(pos)++ = PUSH_IMM32; \ RAW_INSERT_INT32(pos, value); \ } while (0) /* i#142, i#923: 64-bit support now works regardless of where the hook * location and the allocated remote_code_buffer are. * * XXX: this is all really messy: these macros are too limited for * inserting general instructions, so for x64 I hacked it by leaving * in the pushes and copying from TOS into the register params. * I would prefer to throw all this out and replace w/ IR or asm, * which would be easy now that we have drinjectlib. * Although for cross-arch injection (i#803) we want code for both * bitwidths, which actually might be easier w/ the macros for 32-to-64. */ /* If reachable is non-0, ensures the resulting allocation is * 32-bit-disp-reachable from [reachable, reachable+PAGE_SIZE). * For injecting into 64-bit from 32-bit, uses only low addresses. */ static byte * allocate_remote_code_buffer(HANDLE phandle, size_t size, byte *reachable) { NTSTATUS res; byte *buf = (byte *)NULL; #ifdef X64 /* Start at bottom of reachability range and keep trying at higher addresses */ byte *pc = (byte *)ALIGN_FORWARD( REACHABLE_32BIT_START((byte *)reachable, (byte *)reachable + PAGE_SIZE), OS_ALLOC_GRANULARITY); byte *end_pc = (byte *)REACHABLE_32BIT_END((byte *)reachable, (byte *)reachable + PAGE_SIZE); /* we can't just pick an address and see if it gets allocated * b/c it could be in the middle of an existing reservation * (stack, e.g.) and then when we free it we could free the entire * reservation (yes this actually happened: i#753) * Update: we now reserve+commit so this won't happen, but it means * we need to be at an os alloc boundary (64K). */ MEMORY_BASIC_INFORMATION mbi; size_t got; do { /* We do now have remote_query_virtual_memory_maybe64() available, but we * do not yet have allocation (win8+ only) or free (would have to make * one via switch_modes_and_call()) routines, and using low addresses should * always work. We thus stick with 32-bit pointers here even for 64-bit * child processes. */ res = nt_remote_query_virtual_memory(phandle, pc, &mbi, sizeof(mbi), &got); if (got != sizeof(mbi)) { /* bail and hope a low address works, which it will pre-win8 */ break; } if (NT_SUCCESS(res) && mbi.State == MEM_FREE && mbi.RegionSize >= size && /* we're reserving+committing so we need to be at an alloc boundary */ ALIGNED(pc, OS_ALLOC_GRANULARITY) && pc != NULL) { buf = pc; /* we do NOT want mbi.AllocationBase as it may not be reachable */ break; } pc += mbi.RegionSize; } while (NT_SUCCESS(res) && pc + size < end_pc); #endif /* On Win8, a remote MEM_COMMIT in the dll address region fails with * STATUS_CONFLICTING_ADDRESSES. Yet a local commit works, and a remote * reserve+commit works. Go figure. */ /* See above: we use only low addresses. To support high we'd need to add * allocate and free routines via switch_modes_and_call() (we can use * NtWow64AllocateVirtualMemory64 on win8+). */ res = nt_remote_allocate_virtual_memory(phandle, &buf, size, PAGE_EXECUTE_READWRITE, MEM_RESERVE); if (NT_SUCCESS(res)) { res = nt_remote_allocate_virtual_memory(phandle, &buf, size, PAGE_EXECUTE_READWRITE, MEM_COMMIT); } /* We know buf at low end reaches, but might have gone too high. */ if (!NT_SUCCESS(res) || (reachable != 0 && !REL32_REACHABLE(buf + size, (byte *)reachable))) { #ifndef NOT_DYNAMORIO_CORE_PROPER SYSLOG_INTERNAL_ERROR("failed to allocate child memory for injection"); #endif return NULL; } return buf; } static bool free_remote_code_buffer(HANDLE phandle, byte *base) { /* There seems to be no such thing as NtWow64FreeVirtualMemory64! * allocate_remote_code_buffer() is using low address though, so we're good * to use 32-bit pointers even for 64-bit children. */ NTSTATUS res = nt_remote_free_virtual_memory(phandle, base); return NT_SUCCESS(res); } /* Does not support a 64-bit child of a 32-bit DR. */ static void * inject_gencode_at_ldr(HANDLE phandle, char *dynamo_path, uint inject_location, void *inject_address, void *hook_location, byte hook_buf[EARLY_INJECT_HOOK_SIZE], void *must_reach) { void *hook_target; byte *remote_code_buffer = NULL, *remote_data_buffer; /* max usage for local_buf is for writing the dr library name * 2*MAX_PATH (unicode) + sizoef(UNICODE_STRING) + 2, round up to * 3*MAX_PATH to be safe */ byte local_buf[3 * MAX_PATH]; byte *cur_local_pos, *cur_remote_pos, *jmp_fixup1, *jmp_fixup2; char *takeover_func = "dynamorio_app_init_and_early_takeover"; PUNICODE_STRING mod, mod_remote; PANSI_STRING func, func_remote; int res, i; size_t num_bytes_in, num_bytes_out; uint old_prot; GET_NTDLL(LdrLoadDll, (IN PCWSTR PathToFile OPTIONAL, IN PULONG Flags OPTIONAL, IN PUNICODE_STRING ModuleFileName, OUT PHANDLE ModuleHandle)); GET_NTDLL(LdrGetProcedureAddress, (IN HANDLE ModuleHandle, IN PANSI_STRING ProcedureName OPTIONAL, IN ULONG Ordinal OPTIONAL, OUT FARPROC * ProcedureAddress)); #define GET_PROC_ADDR_BAD_ADDR 0xffbadd11 GET_NTDLL(NtProtectVirtualMemory, (IN HANDLE ProcessHandle, IN OUT PVOID * BaseAddress, IN OUT PULONG ProtectSize, IN ULONG NewProtect, OUT PULONG OldProtect)); GET_NTDLL(NtContinue, (IN PCONTEXT Context, IN BOOLEAN TestAlert)); /* get buffer for emitted code and data */ remote_code_buffer = allocate_remote_code_buffer(phandle, 2 * PAGE_SIZE, must_reach); if (remote_code_buffer == NULL) goto error; remote_data_buffer = remote_code_buffer + PAGE_SIZE; /* write data */ /* FIXME the two writes are similar (unicode vs ascii), could combine */ /* First UNICODE_STRING to library */ cur_remote_pos = remote_data_buffer; cur_local_pos = local_buf; ASSERT_ROOM(cur_local_pos, local_buf, sizeof(UNICODE_STRING)); mod = (PUNICODE_STRING)cur_local_pos; memset(mod, 0, sizeof(UNICODE_STRING)); cur_local_pos += sizeof(UNICODE_STRING); mod->Buffer = (wchar_t *)(cur_remote_pos + (cur_local_pos - local_buf)); ASSERT_ROOM(cur_local_pos, local_buf, 2 * MAX_PATH + 2 /* plus null */); res = snwprintf((wchar_t *)cur_local_pos, 2 * MAX_PATH, L"%hs", dynamo_path); ASSERT(res > 0); if (res > 0) { cur_local_pos += (2 * res); ASSERT_TRUNCATE(mod->Length, ushort, 2 * res); mod->Length = (ushort)(2 * res); mod->MaximumLength = (ushort)(2 * res); } /* ensure NULL termination, just in case */ *(wchar_t *)cur_local_pos = L'\0'; cur_local_pos += sizeof(wchar_t); /* write to remote process */ num_bytes_in = cur_local_pos - local_buf; if (!nt_write_virtual_memory(phandle, cur_remote_pos, local_buf, num_bytes_in, &num_bytes_out) || num_bytes_out != num_bytes_in) { goto error; } mod_remote = (PUNICODE_STRING)cur_remote_pos; cur_remote_pos += num_bytes_out; /* now write init/takeover func */ cur_local_pos = local_buf; ASSERT_ROOM(cur_local_pos, local_buf, sizeof(ANSI_STRING)); func = (PANSI_STRING)cur_local_pos; memset(func, 0, sizeof(ANSI_STRING)); cur_local_pos += sizeof(ANSI_STRING); func->Buffer = (PCHAR)cur_remote_pos + (cur_local_pos - local_buf); ASSERT_ROOM(cur_local_pos, local_buf, strlen(takeover_func) + 1); strncpy((char *)cur_local_pos, takeover_func, strlen(takeover_func)); cur_local_pos += strlen(takeover_func); ASSERT_TRUNCATE(func->Length, ushort, strlen(takeover_func)); func->Length = (ushort)strlen(takeover_func); func->MaximumLength = (ushort)strlen(takeover_func); *cur_local_pos++ = '\0'; /* ensure NULL termination, just in case */ /* write to remote_process */ num_bytes_in = cur_local_pos - local_buf; if (!nt_write_virtual_memory(phandle, cur_remote_pos, local_buf, num_bytes_in, &num_bytes_out) || num_bytes_out != num_bytes_in) { goto error; } func_remote = (PANSI_STRING)cur_remote_pos; cur_remote_pos += num_bytes_out; /* now make data page read only */ res = nt_remote_protect_virtual_memory(phandle, remote_data_buffer, PAGE_SIZE, PAGE_READONLY, &old_prot); ASSERT(res); #define INSERT_INT(value) RAW_INSERT_INT32(cur_local_pos, value) #define INSERT_ADDR(value) \ *(ptr_int_t *)cur_local_pos = (ptr_int_t)(value); \ cur_local_pos += sizeof(ptr_int_t) #ifdef X64 # define INSERT_PUSH_ALL_REG() \ *cur_local_pos++ = PUSH_EAX; \ *cur_local_pos++ = PUSH_ECX; \ *cur_local_pos++ = 0x52; /* xdx */ \ *cur_local_pos++ = 0x53; /* xbx */ \ *cur_local_pos++ = 0x54; /* xsp */ \ *cur_local_pos++ = 0x55; /* xbp */ \ *cur_local_pos++ = 0x56; /* xsi */ \ *cur_local_pos++ = 0x57; /* xdi */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = PUSH_EAX; /* r8 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = PUSH_ECX; /* r9 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x52; /* r10 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x53; /* r11 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x54; /* r12 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x55; /* r13 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x56; /* r14 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x57; /* r15 */ #else # define INSERT_PUSH_ALL_REG() *cur_local_pos++ = PUSHA #endif #ifdef X64 # define INSERT_POP_ALL_REG() \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x5f; /* r15 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x5e; /* r14 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x5d; /* r13 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x5c; /* r12 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x5b; /* r11 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = 0x5a; /* r10 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = POP_ECX; /* r9 */ \ *cur_local_pos++ = REX_B; \ *cur_local_pos++ = POP_EAX; /* r8 */ \ *cur_local_pos++ = 0x5f; /* xdi */ \ *cur_local_pos++ = 0x5e; /* xsi */ \ *cur_local_pos++ = 0x5d; /* xbp */ \ *cur_local_pos++ = 0x5b; /* xsp slot but popped into dead xbx */ \ *cur_local_pos++ = 0x5b; /* xbx */ \ *cur_local_pos++ = 0x5a; /* xdx */ \ *cur_local_pos++ = POP_ECX; \ *cur_local_pos++ = POP_EAX #else # define INSERT_POP_ALL_REG() *cur_local_pos++ = POPA #endif #define PUSH_IMMEDIATE(value) RAW_PUSH_INT32(cur_local_pos, value) #define PUSH_SHORT_IMMEDIATE(value) \ *cur_local_pos++ = PUSH_IMM8; \ *cur_local_pos++ = value #ifdef X64 # define PUSH_PTRSZ_IMMEDIATE(value) RAW_PUSH_INT64(cur_local_pos, value) #else # define PUSH_PTRSZ_IMMEDIATE(value) PUSH_IMMEDIATE(value) #endif #define MOV_ESP_TO_EAX() \ IF_X64(*cur_local_pos++ = REX_W;) \ *cur_local_pos++ = MOV_RM32_2_REG32; \ *cur_local_pos++ = MOV_ESP_2_EAX_RM #ifdef X64 /* mov rax -> rcx */ # define MOV_EAX_TO_PARAM_0() \ *cur_local_pos++ = REX_W; \ *cur_local_pos++ = MOV_RM32_2_REG32; \ *cur_local_pos++ = MOV_EAX_2_ECX_RM /* mov rax -> rdx */ # define MOV_EAX_TO_PARAM_1() \ *cur_local_pos++ = REX_W; \ *cur_local_pos++ = MOV_RM32_2_REG32; \ *cur_local_pos++ = MOV_EAX_2_EDX_RM /* mov rax -> r8 */ # define MOV_EAX_TO_PARAM_2() \ *cur_local_pos++ = REX_R | REX_W; \ *cur_local_pos++ = MOV_RM32_2_REG32; \ *cur_local_pos++ = MOV_EAX_2_EAX_RM /* mov rax -> r9 */ # define MOV_EAX_TO_PARAM_3() \ *cur_local_pos++ = REX_R | REX_W; \ *cur_local_pos++ = MOV_RM32_2_REG32; \ *cur_local_pos++ = MOV_EAX_2_ECX_RM /* mov (rsp) -> rcx */ # define MOV_TOS_TO_PARAM_0() \ *cur_local_pos++ = REX_W; \ *cur_local_pos++ = 0x8b; \ *cur_local_pos++ = 0x0c; \ *cur_local_pos++ = 0x24 /* mov (rsp) -> rdx */ # define MOV_TOS_TO_PARAM_1() \ *cur_local_pos++ = REX_W; \ *cur_local_pos++ = 0x8b; \ *cur_local_pos++ = 0x14; \ *cur_local_pos++ = 0x24 /* mov (rsp) -> r8 */ # define MOV_TOS_TO_PARAM_2() \ *cur_local_pos++ = REX_R | REX_W; \ *cur_local_pos++ = 0x8b; \ *cur_local_pos++ = 0x04; \ *cur_local_pos++ = 0x24 /* mov (rsp) -> r9 */ # define MOV_TOS_TO_PARAM_3() \ *cur_local_pos++ = REX_R | REX_W; \ *cur_local_pos++ = 0x8b; \ *cur_local_pos++ = 0x0c; \ *cur_local_pos++ = 0x24 #endif /* X64 */ /* FIXME - all values are small use imm8 version */ #define ADD_TO_EAX(value) \ IF_X64(*cur_local_pos++ = REX_W;) \ *cur_local_pos++ = ADD_EAX_IMM32; \ INSERT_INT(value) #define ADD_IMM8_TO_ESP(value) \ IF_X64(*cur_local_pos++ = REX_W;) \ *cur_local_pos++ = 0x83; \ *cur_local_pos++ = 0xc4; \ *cur_local_pos++ = (byte)(value); #define CMP_TO_EAX(value) \ IF_X64(*cur_local_pos++ = REX_W;) \ *cur_local_pos++ = CMP_EAX_IMM32; \ INSERT_INT(value) #define INSERT_REL32_ADDRESS(target) \ IF_X64(ASSERT_NOT_IMPLEMENTED(REL32_REACHABLE( \ ((cur_local_pos - local_buf) + 4) + cur_remote_pos, (byte *)(target)))); \ INSERT_INT((int)(ptr_int_t)((byte *)target - \ (((cur_local_pos - local_buf) + 4) + cur_remote_pos))) #ifdef X64 /* for reachability, go through eax, which should be dead */ # define CALL(target_func) \ *cur_local_pos++ = REX_W; \ *cur_local_pos++ = MOV_IMM_XAX; \ INSERT_ADDR(target_func); \ *cur_local_pos++ = CALL_RM32; \ *cur_local_pos++ = CALL_EAX_RM; #else # define CALL(target_func) \ *cur_local_pos++ = CALL_REL32; \ INSERT_REL32_ADDRESS(target_func) #endif /* X64 */ /* ecx will hold OldProtection afterwards */ /* for x64 we need the 4 stack slots anyway so we do the pushes */ /* on x64, up to caller to have rsp aligned to 16 prior to calling this macro */ #define PROT_IN_ECX 0xbad5bad /* doesn't match a PAGE_* define */ #define CHANGE_PROTECTION(start, size, new_protection) \ *cur_local_pos++ = PUSH_EAX; /* OldProtect slot */ \ MOV_ESP_TO_EAX(); /* get &OldProtect */ \ PUSH_PTRSZ_IMMEDIATE(ALIGN_FORWARD(start + size, PAGE_SIZE) - \ ALIGN_BACKWARD(start, PAGE_SIZE)); /* ProtectSize */ \ PUSH_PTRSZ_IMMEDIATE(ALIGN_BACKWARD(start, PAGE_SIZE)); /* BaseAddress */ \ *cur_local_pos++ = PUSH_EAX; /* arg 5 &OldProtect */ \ if (new_protection == PROT_IN_ECX) { \ *cur_local_pos++ = PUSH_ECX; /* arg 4 NewProtect */ \ } else { \ PUSH_IMMEDIATE(new_protection); /* arg 4 NewProtect */ \ } \ IF_X64(MOV_TOS_TO_PARAM_3()); \ ADD_TO_EAX(-(int)XSP_SZ); /* get &ProtectSize */ \ *cur_local_pos++ = PUSH_EAX; /* arg 3 &ProtectSize */ \ IF_X64(MOV_EAX_TO_PARAM_2()); \ ADD_TO_EAX(-(int)XSP_SZ); /* get &BaseAddress */ \ *cur_local_pos++ = PUSH_EAX; /* arg 2 &BaseAddress */ \ IF_X64(MOV_EAX_TO_PARAM_1()); \ PUSH_IMMEDIATE((int)(ptr_int_t)NT_CURRENT_PROCESS); /* arg ProcessHandle */ \ IF_X64(MOV_TOS_TO_PARAM_0()); \ CALL(NtProtectVirtualMemory); /* 8 pushes => still aligned to 16 */ \ /* no error checking, can't really do anything about it, FIXME */ \ /* stdcall so just the three slots we made for the ptr arguments \ * left on the stack for 32-bit */ \ IF_X64(ADD_IMM8_TO_ESP(5 * XSP_SZ)); /* clean up 5 slots */ \ *cur_local_pos++ = POP_ECX; /* pop BaseAddress */ \ *cur_local_pos++ = POP_ECX; /* pop ProtectSize */ \ *cur_local_pos++ = POP_ECX /* pop OldProtect into ecx */ /* write code */ /* xref case 3821, first call to a possibly hooked routine should be * more then 5 bytes into the page, which is satisfied (though is not * clear if any hookers would manage to get in first). */ cur_remote_pos = remote_code_buffer; cur_local_pos = local_buf; hook_target = cur_remote_pos; /* for inject_location INJECT_LOCATION_Ldr* we stick the address used * at the start of the code for the child's use */ if (INJECT_LOCATION_IS_LDR(inject_location)) { INSERT_ADDR(inject_address); hook_target = cur_remote_pos + sizeof(ptr_int_t); /* skip the address */ } #if DEBUG_LOOP *cur_local_pos++ = JMP_REL8; *cur_local_pos++ = 0xfe; #endif /* save current state */ INSERT_PUSH_ALL_REG(); *cur_local_pos++ = PUSHF; /* restore trampoline, first make writable */ CHANGE_PROTECTION(hook_location, EARLY_INJECT_HOOK_SIZE, PAGE_EXECUTE_READWRITE); /* put target in xax to ensure we can reach it */ IF_X64(*cur_local_pos++ = REX_W); *cur_local_pos++ = MOV_IMM_XAX; INSERT_ADDR(hook_location); for (i = 0; i < EARLY_INJECT_HOOK_SIZE / 4; i++) { /* restore bytes 4*i..4*i+3 of hook */ *cur_local_pos++ = MOV_IMM32_2_RM32; *cur_local_pos++ = MOV_deref_disp8_EAX_2_EAX_RM; *cur_local_pos++ = (byte)i * 4; INSERT_INT(*((int *)hook_buf + i)); } for (i = i * 4; i < EARLY_INJECT_HOOK_SIZE; i++) { /* restore byte i of hook */ *cur_local_pos++ = MOV_IMM8_2_RM8; *cur_local_pos++ = MOV_deref_disp8_EAX_2_EAX_RM; *cur_local_pos++ = (byte)i; *cur_local_pos++ = hook_buf[i]; } /* hook restored, restore protection */ CHANGE_PROTECTION(hook_location, EARLY_INJECT_HOOK_SIZE, PROT_IN_ECX); if (inject_location == INJECT_LOCATION_KiUserException) { /* Making the first page of the image unreadable triggers an exception * to early to use the loader, might try pointing the import table ptr * to bad memory instead TOTRY, whatever we do should fixup here */ ASSERT_NOT_IMPLEMENTED(false); } /* call LdrLoadDll to load dr library */ *cur_local_pos++ = PUSH_EAX; /* need slot for OUT hmodule*/ MOV_ESP_TO_EAX(); IF_X64(*cur_local_pos++ = PUSH_EAX); /* extra slot to align to 16 for call */ *cur_local_pos++ = PUSH_EAX; /* arg 4 OUT *hmodule */ IF_X64(MOV_EAX_TO_PARAM_3()); /* XXX: these push-ptrsz, mov-tos sequences are inefficient, but simpler * for cross-platform */ PUSH_PTRSZ_IMMEDIATE((ptr_int_t)mod_remote); /* our library name */ IF_X64(MOV_TOS_TO_PARAM_2()); PUSH_SHORT_IMMEDIATE(0x0); /* Flags OPTIONAL */ IF_X64(MOV_TOS_TO_PARAM_1()); PUSH_SHORT_IMMEDIATE(0x0); /* PathToFile OPTIONAL */ IF_X64(MOV_TOS_TO_PARAM_0()); CALL(LdrLoadDll); /* see signature at declaration above */ IF_X64(ADD_IMM8_TO_ESP(5 * XSP_SZ)); /* clean up 5 slots */ /* stdcall so removed args so top of stack is now the slot containing the * returned handle. Use LdrGetProcedureAddress to get the address of the * dr init and takeover function. Is ok to call even if LdrLoadDll failed, * so we check for errors afterwards. */ *cur_local_pos++ = POP_ECX; /* dr module handle */ *cur_local_pos++ = PUSH_ECX; /* need slot for out ProcedureAddress */ MOV_ESP_TO_EAX(); IF_X64(*cur_local_pos++ = PUSH_EAX); /* extra slot to align to 16 for call */ *cur_local_pos++ = PUSH_EAX; /* arg 4 OUT *ProcedureAddress */ IF_X64(MOV_EAX_TO_PARAM_3()); PUSH_SHORT_IMMEDIATE(0x0); /* Ordinal OPTIONAL */ IF_X64(MOV_TOS_TO_PARAM_2()); PUSH_PTRSZ_IMMEDIATE((ptr_int_t)func_remote); /* func name */ IF_X64(MOV_TOS_TO_PARAM_1()); *cur_local_pos++ = PUSH_ECX; /* module handle */ IF_X64(MOV_TOS_TO_PARAM_0()); /* for x64, aligned at LdrLoadDll - 5 - 1 + 6 => aligned here */ CALL(LdrGetProcedureAddress); /* see signature at declaration above */ IF_X64(ADD_IMM8_TO_ESP(5 * XSP_SZ)); /* clean up 5 slots */ /* Top of stack is now the dr init and takeover function (stdcall removed * args). Check for errors and bail (FIXME debug build report somehow?) */ CMP_TO_EAX(STATUS_SUCCESS); *cur_local_pos++ = POP_EAX; /* dr init_and_takeover function */ *cur_local_pos++ = JNZ_REL8; /* FIXME - should check >= 0 instead? */ jmp_fixup1 = cur_local_pos++; /* jmp to after call below */ /* Xref case 8373, LdrGetProcedureAdderss sometimes returns an * address of 0xffbadd11 even though it returned STATUS_SUCCESS */ CMP_TO_EAX((int)GET_PROC_ADDR_BAD_ADDR); *cur_local_pos++ = JZ_REL8; /* JZ == JE */ jmp_fixup2 = cur_local_pos++; /* jmp to after call below */ IF_X64(ADD_IMM8_TO_ESP(-2 * (int)XSP_SZ)); /* need 4 slots total */ PUSH_PTRSZ_IMMEDIATE((ptr_int_t)remote_code_buffer); /* arg to takeover func */ IF_X64(MOV_TOS_TO_PARAM_1()); PUSH_IMMEDIATE(inject_location); /* arg to takeover func */ IF_X64(MOV_TOS_TO_PARAM_0()); /* for x64, 2 pushes => aligned to 16 */ *cur_local_pos++ = CALL_RM32; /* call EAX */ *cur_local_pos++ = CALL_EAX_RM; #ifdef X64 IF_X64(ADD_IMM8_TO_ESP(4 * XSP_SZ)); /* clean up 4 slots */ #else *cur_local_pos++ = POP_ECX; /* cdecl so pop arg */ *cur_local_pos++ = POP_ECX; /* cdecl so pop arg */ #endif /* Now patch the jnz above (if error) to go to here */ ASSERT_TRUNCATE(*jmp_fixup1, byte, cur_local_pos - (jmp_fixup1 + 1)); *jmp_fixup1 = (byte)(cur_local_pos - (jmp_fixup1 + 1)); /* target of jnz */ ASSERT_TRUNCATE(*jmp_fixup2, byte, cur_local_pos - (jmp_fixup2 + 1)); *jmp_fixup2 = (byte)(cur_local_pos - (jmp_fixup2 + 1)); /* target of jz */ *cur_local_pos++ = POPF; INSERT_POP_ALL_REG(); if (inject_location != INJECT_LOCATION_KiUserException) { /* jmp back to the hook location to resume execution */ #ifdef X64 /* ind jmp w/ target rip-rel right after (thus 0 disp) */ *cur_local_pos++ = 0xff; *cur_local_pos++ = 0x25; INSERT_INT(0); INSERT_ADDR(hook_location); #else *cur_local_pos++ = JMP_REL32; INSERT_REL32_ADDRESS(hook_location); #endif } else { /* we triggered the exception, so do an NtContinue back */ /* see callback.c, esp+4 holds CONTEXT ** */ *cur_local_pos++ = POP_EAX; /* EXCEPTION_RECORD ** */ *cur_local_pos++ = POP_EAX; /* CONTEXT ** */ PUSH_SHORT_IMMEDIATE(FALSE); /* arg 2 TestAlert */ IF_X64(MOV_TOS_TO_PARAM_1()); *cur_local_pos++ = MOV_RM32_2_REG32; *cur_local_pos++ = MOV_derefEAX_2_EAX_RM; /* CONTEXT * -> EAX */ *cur_local_pos++ = PUSH_EAX; /* push CONTEXT * (arg 1) */ IF_X64(MOV_EAX_TO_PARAM_0()); IF_X64(ADD_IMM8_TO_ESP(-4 * (int)XSP_SZ)); /* 4 slots */ CALL(NtContinue); /* should never get here, will be zeroed memory so will crash if * we do happen to get here, good enough reporting */ } /* Our emitted code above is much less then the sizeof local_buf, * but we'll add a check here (after the fact so not robust if really * overflowed) that we didn't even come close (someon adding large amounts * of code should hit this. FIXME - do better? */ ASSERT_ROOM(cur_local_pos, local_buf, MAX_PATH); num_bytes_in = cur_local_pos - local_buf; if (!nt_write_virtual_memory(phandle, cur_remote_pos, local_buf, num_bytes_in, &num_bytes_out) || num_bytes_out != num_bytes_in) { goto error; } cur_remote_pos += num_bytes_out; /* now make code page rx */ res = nt_remote_protect_virtual_memory(phandle, remote_code_buffer, PAGE_SIZE, PAGE_EXECUTE_READ, &old_prot); ASSERT(res); #undef INSERT_INT #undef PUSH_IMMEDIATE #undef PUSH_SHORT_IMMEDIATE #undef MOV_ESP_TO_EAX #undef ADD_TO_EAX #undef INSERT_REL32_ADDRESS #undef CALL #undef PROT_IN_ECX #undef CHANGE_PROTECTION return hook_target; error: return NULL; } /* make gencode easier to read */ #define APP instrlist_append #define GDC GLOBAL_DCONTEXT #define SWITCH_MODE_DATA_SIZE 4 /* size of 32 bit stack ptr */ #ifdef X64 /* This function is necessary b/c the original logic push the hook location on * the stack and jump to dynamorio. Dynamorio start translating the first * return address and control transfer to it. It then run in translated * mode and when it unwinds the stack at some point it will jump to hook * location(which is pushed on the stack). If the dynamorio is 64 bit the * first return address it will see will be 64 bit and hence when it finds * the 32 bit address on the stack it will treat it as a 64 bit address. * Instead of pushing the hook location on the stack we are pushing the * location of the sequece of code which does a mode switch and jump to * the hook location. */ /* This function genearates the code for mode switch after returning * from dynamorio. local_code_buf is the parent process buf which will * temporarily hold the generated instructions. mode_switch_buf is the * location where the actual switch_code will be stored in the target * process, mode_switch_buf_sz is maximum size for switch code, and * mode_switch_data is the address where the app stack pointer is stored. */ static size_t generate_switch_mode_jmp_to_hook(HANDLE phandle, byte *local_code_buf, byte *mode_switch_buf, byte *hook_location, size_t mode_switch_buf_sz, byte *mode_switch_data) { /* Switch to 32 bit mode * Restore the stack * Jump to the hook location */ byte *pc; instrlist_t ilist; size_t num_bytes_out, sz; uint target; instr_t *jmp = INSTR_CREATE_jmp(GDC, opnd_create_pc((app_pc)hook_location)); instr_t *restore_esp = INSTR_CREATE_mov_ld(GDC, opnd_create_reg(REG_ESP), OPND_CREATE_MEM32(REG_NULL, (int)(size_t)mode_switch_data)); instr_set_x86_mode(jmp, true); instr_set_x86_mode(restore_esp, true); instrlist_init(&ilist); /* We patch the 0 with the correct target location in this function */ APP(&ilist, INSTR_CREATE_push_imm(GDC, OPND_CREATE_INT32(0))); APP(&ilist, INSTR_CREATE_mov_st(GDC, OPND_CREATE_MEM16(REG_RSP, 4), OPND_CREATE_INT16((ushort)CS32_SELECTOR))); APP(&ilist, INSTR_CREATE_jmp_far_ind(GDC, opnd_create_base_disp(REG_RSP, REG_NULL, 0, 0, OPSZ_6))); APP(&ilist, restore_esp); APP(&ilist, jmp); pc = instrlist_encode_to_copy(GDC, &ilist, local_code_buf, mode_switch_buf, local_code_buf + mode_switch_buf_sz, true /*has instr targets*/); ASSERT(pc != NULL && pc < local_code_buf + mode_switch_buf_sz); /* Calculate the offset of first instruction after switching * to x86 mode */ sz = (size_t)(pc - local_code_buf - instr_length(GDC, jmp) - instr_length(GDC, restore_esp)); instrlist_clear(GDC, &ilist); /* For x86 code the address must be 32 bit */ ASSERT_TRUNCATE(target, uint, (size_t)mode_switch_buf); target = (uint)(size_t)((byte *)mode_switch_buf + sz); /* Patch the operand of push with target of jmp far indirect. * 1 is the size of the opcode of push instruction. */ *(uint *)(local_code_buf + 1) = target; /* FIXME: Need to free this page after jumping to the hook location b/c * after that it is no longer necessary */ sz = (size_t)(pc - local_code_buf); /* copy local buffer to child process */ if (!write_remote_memory_maybe64(phandle, (uint64)mode_switch_buf, local_code_buf, pc - local_code_buf, &num_bytes_out) || num_bytes_out != sz) { return false; } return sz; } #endif static uint64 find_remote_ntdll_base(HANDLE phandle, bool find64bit) { MEMORY_BASIC_INFORMATION64 mbi; uint64 got; NTSTATUS res; uint64 addr = 0; char name[MAXIMUM_PATH]; do { res = remote_query_virtual_memory_maybe64(phandle, addr, &mbi, sizeof(mbi), &got); if (got != sizeof(mbi) || !NT_SUCCESS(res)) break; #if VERBOSE print_file(STDERR, "0x%I64x-0x%I64x type=0x%x state=0x%x\n", mbi.BaseAddress, mbi.BaseAddress + mbi.RegionSize, mbi.Type, mbi.State); #endif if (mbi.Type == MEM_IMAGE && mbi.BaseAddress == mbi.AllocationBase) { bool is_64; if (get_remote_dll_short_name(phandle, mbi.BaseAddress, name, BUFFER_SIZE_ELEMENTS(name), &is_64)) { #if VERBOSE print_file(STDERR, "found |%s| @ 0x%I64x 64=%d\n", name, mbi.BaseAddress, is_64); #endif if (strcmp(name, "ntdll.dll") == 0 && BOOLS_MATCH(find64bit, is_64)) return mbi.BaseAddress; } } if (addr + mbi.RegionSize < addr) break; addr += mbi.RegionSize; } while (true); return 0; } static uint64 inject_gencode_mapped_helper(HANDLE phandle, char *dynamo_path, uint64 hook_location, byte hook_buf[EARLY_INJECT_HOOK_SIZE], byte *map, void *must_reach, bool x86_code, bool late_injection) { uint64 remote_code_buf = 0, remote_data; byte *local_code_buf = NULL; uint64 pc; uint64 hook_code_buf = 0; const size_t remote_alloc_sz = 2 * PAGE_SIZE; /* one code, one data */ const size_t code_alloc_sz = PAGE_SIZE; size_t hook_code_sz = PAGE_SIZE; uint64 switch_code_location = hook_location; #ifdef X64 byte *mode_switch_buf = NULL; byte *mode_switch_data = NULL; size_t switch_code_sz = PAGE_SIZE; static const size_t switch_data_sz = SWITCH_MODE_DATA_SIZE; #endif size_t num_bytes_out; uint old_prot; earliest_args_t args; int i; bool target_64 = !x86_code IF_X64(|| DYNAMO_OPTION(inject_x64)); /* Generate code and data. */ /* We only support low-address remote allocations. */ IF_NOT_X64(ASSERT(!target_64 || must_reach == NULL)); remote_code_buf = (uint64)allocate_remote_code_buffer(phandle, remote_alloc_sz, must_reach); if (remote_code_buf == 0) goto error; /* we can't use heap_mmap() in drinjectlib */ local_code_buf = allocate_remote_code_buffer(NT_CURRENT_PROCESS, code_alloc_sz, NULL); hook_code_buf = remote_code_buf; remote_data = remote_code_buf + code_alloc_sz; ASSERT(sizeof(args) < PAGE_SIZE); #ifdef X64 if (x86_code && DYNAMO_OPTION(inject_x64)) { mode_switch_buf = (byte *)remote_code_buf; switch_code_location = (uint64)mode_switch_buf; mode_switch_data = (byte *)remote_data; remote_data += switch_data_sz; switch_code_sz = generate_switch_mode_jmp_to_hook( phandle, local_code_buf, mode_switch_buf, (byte *)hook_location, switch_code_sz, mode_switch_data); if (!switch_code_sz || switch_code_sz == PAGE_SIZE) goto error; hook_code_sz -= switch_code_sz; hook_code_buf += switch_code_sz; } #endif /* see below on why it's easier to point at args in memory */ args.dr_base = (uint64)map; args.ntdll_base = find_remote_ntdll_base(phandle, target_64); if (args.ntdll_base == 0) goto error; args.tofree_base = remote_code_buf; args.hook_location = hook_location; args.late_injection = late_injection; strncpy(args.dynamorio_lib_path, dynamo_path, BUFFER_SIZE_ELEMENTS(args.dynamorio_lib_path)); NULL_TERMINATE_BUFFER(args.dynamorio_lib_path); if (!write_remote_memory_maybe64(phandle, remote_data, &args, sizeof(args), &num_bytes_out) || num_bytes_out != sizeof(args)) { goto error; } /* We would prefer to use IR to generate our instructions, but we need to support * creating 64-bit code from 32-bit DR. XXX i#1684: Once we have multi-arch * cross-bitwidth IR support from a single build, switch this back to using IR. */ byte *cur_local_pos = local_code_buf; #ifdef X64 if (x86_code && DYNAMO_OPTION(inject_x64)) { /* Mode Switch from 32 bit to 64 bit. * Forward align stack. */ *cur_local_pos++ = MOV_REG32_2_RM32; *cur_local_pos++ = 0x24; *cur_local_pos++ = 0x25; RAW_INSERT_INT32(cur_local_pos, mode_switch_data); /* Far jmp to next instr. */ const int far_jmp_len = 7; byte *pre_jmp = cur_local_pos; *cur_local_pos++ = JMP_FAR_DIRECT; RAW_INSERT_INT32(cur_local_pos, pre_jmp + far_jmp_len); RAW_INSERT_INT16(cur_local_pos, CS64_SELECTOR); ASSERT(cur_local_pos == pre_jmp + far_jmp_len); /* Align stack. */ *cur_local_pos++ = AND_RM32_IMM32; *cur_local_pos++ = 0xe4; RAW_INSERT_INT32(cur_local_pos, -8); } #endif /* Save xax, which we clobber below. It is live for INJECT_LOCATION_ThreadStart. * We write it into earliest_args_t.app_xax, and in dynamorio_earliest_init_takeover * we use the saved value to update the PUSHGRP pushed xax. */ if (target_64) *cur_local_pos++ = REX_W; *cur_local_pos++ = MOV_REG32_2_RM32; *cur_local_pos++ = MOV_IMM_RM_ABS; uint64 cur_remote_pos = remote_code_buf + (cur_local_pos - local_code_buf); RAW_INSERT_INT32(cur_local_pos, target_64 ? (remote_data - (cur_remote_pos + sizeof(int))) : remote_data); /* Restore hook rather than trying to pass contents to C code * (we leave hooked page writable for this and C code restores). */ if (target_64) *cur_local_pos++ = REX_W; *cur_local_pos++ = MOV_IMM_XAX; if (target_64) RAW_INSERT_INT64(cur_local_pos, hook_location); else RAW_INSERT_INT32(cur_local_pos, hook_location); for (i = 0; i < EARLY_INJECT_HOOK_SIZE / 4; i++) { /* Restore bytes 4*i..4*i+3 of the hook. */ *cur_local_pos++ = MOV_IMM32_2_RM32; *cur_local_pos++ = MOV_deref_disp8_EAX_2_EAX_RM; RAW_INSERT_INT8(cur_local_pos, i * 4); RAW_INSERT_INT32(cur_local_pos, *((int *)hook_buf + i)); } for (i = i * 4; i < EARLY_INJECT_HOOK_SIZE; i++) { /* Restore byte i of the hook. */ *cur_local_pos++ = MOV_IMM8_2_RM8; *cur_local_pos++ = MOV_deref_disp8_EAX_2_EAX_RM; RAW_INSERT_INT8(cur_local_pos, i); RAW_INSERT_INT8(cur_local_pos, (char)hook_buf[i]); } /* Call DR earliest-takeover routine w/ retaddr pointing at hooked * location. DR will free remote_code_buf. * If we passed regular args to a C routine, we'd clobber the args to * the routine we hooked. We would then need to return here to restore, * it would be more complicated to free remote_code_buf, and we'd want * dr_insert_call() in drdecodelib, etc. So we instead only touch * xax here and we target an asm routine in DR that will preserve the * other regs, enabling returning to the hooked routine w/ the * original state (except xax which is scratch and xbx which kernel * isn't counting on of course). * We pass our args in memory pointed at by xax stored in the 2nd page. */ if (target_64) *cur_local_pos++ = REX_W; *cur_local_pos++ = MOV_IMM_XAX; if (target_64) RAW_INSERT_INT64(cur_local_pos, remote_data); else RAW_INSERT_INT32(cur_local_pos, remote_data); /* We can't use dr_insert_call() b/c it's not avail in drdecode for drinject, * and its main value is passing params and we can't use regular param regs. * we don't even want the 4 stack slots for x64 here b/c we don't want to * clean them up. */ if (target_64) RAW_PUSH_INT64(cur_local_pos, switch_code_location); else RAW_PUSH_INT32(cur_local_pos, switch_code_location); pc = get_remote_proc_address(phandle, (uint64)map, "dynamorio_earliest_init_takeover"); if (pc == 0) goto error; if (REL32_REACHABLE((int64)pc, (int64)hook_code_buf) && /* over-estimate to be sure: we assert below we're < PAGE_SIZE */ REL32_REACHABLE((int64)pc, (int64)remote_code_buf + PAGE_SIZE)) { *cur_local_pos++ = JMP_REL32; cur_remote_pos = remote_code_buf + (cur_local_pos - local_code_buf); RAW_INSERT_INT32(cur_local_pos, (int64)pc - (int64)(cur_remote_pos + sizeof(int))); } else { /* Indirect through an inlined target. */ *cur_local_pos++ = JMP_ABS_IND64_OPCODE; *cur_local_pos++ = JMP_ABS_MEM_IND64_MODRM; cur_remote_pos = remote_code_buf + (cur_local_pos - local_code_buf); RAW_INSERT_INT32(cur_local_pos, target_64 ? 0 : cur_remote_pos + sizeof(int)); if (target_64) RAW_INSERT_INT64(cur_local_pos, pc); else RAW_INSERT_INT32(cur_local_pos, pc); } ASSERT(cur_local_pos - local_code_buf <= (ssize_t)hook_code_sz); /* copy local buffer to child process */ if (!write_remote_memory_maybe64(phandle, hook_code_buf, local_code_buf, cur_local_pos - local_code_buf, &num_bytes_out) || num_bytes_out != (size_t)(cur_local_pos - local_code_buf)) { goto error; } if (!remote_protect_virtual_memory_maybe64(phandle, remote_code_buf, remote_alloc_sz, PAGE_EXECUTE_READWRITE, &old_prot)) { ASSERT_NOT_REACHED(); goto error; } free_remote_code_buffer(NT_CURRENT_PROCESS, local_code_buf); return hook_code_buf; error: if (local_code_buf != NULL) free_remote_code_buffer(NT_CURRENT_PROCESS, local_code_buf); if (remote_code_buf != 0) free_remote_code_buffer(phandle, (byte *)(ptr_int_t)remote_code_buf); return 0; } /* i#234: earliest injection so we see every single user-mode instruction * Supports a 64-bit child of a 32-bit DR. * XXX i#625: not supporting rebasing: assuming no conflict w/ executable. */ static uint64 inject_gencode_mapped(HANDLE phandle, char *dynamo_path, uint64 hook_location, byte hook_buf[EARLY_INJECT_HOOK_SIZE], void *must_reach, bool x86_code, bool late_injection) { bool success = false; NTSTATUS res; HANDLE file = INVALID_HANDLE_VALUE; HANDLE section = INVALID_HANDLE_VALUE; byte *map = NULL; size_t view_size = 0; wchar_t dllpath[MAX_PATH]; uint64 ret = 0; /* map DR dll into child * * FIXME i#625: check memory in child for conflict w/ DR from executable * (PEB->ImageBaseAddress doesn't seem to be set by kernel so how * locate executable easily?) and fall back to late injection. * Eventually we'll have to support rebasing from parent, or from * contains-no-relocation code in DR. */ if (!convert_to_NT_file_path(dllpath, dynamo_path, BUFFER_SIZE_ELEMENTS(dllpath))) goto done; NULL_TERMINATE_BUFFER(dllpath); res = nt_create_module_file(&file, dllpath, NULL, FILE_EXECUTE | FILE_READ_DATA, FILE_ATTRIBUTE_NORMAL, FILE_SHARE_READ, FILE_OPEN, 0); if (!NT_SUCCESS(res)) goto done; res = nt_create_section(&section, SECTION_ALL_ACCESS, NULL, /* full file size */ PAGE_EXECUTE_WRITECOPY, SEC_IMAGE, file, /* XXX: do we need security options to put in other process?*/ NULL /* unnamed */, 0, NULL, NULL); if (!NT_SUCCESS(res)) goto done; /* For 32-into-64, there's no NtWow64 version so we rely on this simply mapping * into the low 2G. */ res = nt_raw_MapViewOfSection(section, phandle, &map, 0, 0 /* not page-file-backed */, NULL, (PSIZE_T)&view_size, ViewUnmap, 0 /* no special top-down or anything */, PAGE_EXECUTE_WRITECOPY); if (!NT_SUCCESS(res)) goto done; ret = inject_gencode_mapped_helper(phandle, dynamo_path, hook_location, hook_buf, map, must_reach, x86_code, late_injection); done: if (ret == 0) { close_handle(file); close_handle(section); } return ret; } /* Early injection. */ /* XXX: Like inject_into_thread we assume esp, but we could allocate our * own stack in the child and swap to that for transparency. */ bool inject_into_new_process(HANDLE phandle, HANDLE thandle, char *dynamo_path, bool map, uint inject_location, void *inject_address) { /* To handle a 64-bit child of a 32-bit DR we use "uint64" for remote addresses. */ uint64 hook_target = 0; uint64 hook_location = 0; uint old_prot; size_t num_bytes_out; byte hook_buf[EARLY_INJECT_HOOK_SIZE]; bool x86_code = false; bool late_injection = false; uint64 image_entry = 0; union { /* Ensure we're not using too much stack via a union. */ CONTEXT cxt; #ifndef X64 CONTEXT_64 cxt64; #endif } cxt; /* Possible child hook points */ GET_NTDLL(KiUserApcDispatcher, (IN PVOID Unknown1, IN PVOID Unknown2, IN PVOID Unknown3, IN PVOID ContextStart, IN PVOID ContextBody)); GET_NTDLL(KiUserExceptionDispatcher, (IN PVOID Unknown1, IN PVOID Unknown2)); switch (inject_location) { case INJECT_LOCATION_LdrLoadDll: case INJECT_LOCATION_LdrpLoadDll: case INJECT_LOCATION_LdrCustom: case INJECT_LOCATION_LdrpLoadImportModule: case INJECT_LOCATION_LdrDefault: /* caller provides the ldr address to use */ ASSERT(inject_address != NULL); hook_location = (uint64)inject_address; if (hook_location == 0) { goto error; } break; case INJECT_LOCATION_KiUserApc: { /* FIXME i#234 NYI: for wow64 need to hook ntdll64 NtMapViewOfSection */ #ifdef NOT_DYNAMORIO_CORE_PROPER PEB *peb = get_own_peb(); if (peb->OSMajorVersion >= 6) { #else if (get_os_version() >= WINDOWS_VERSION_VISTA) { #endif /* LdrInitializeThunk isn't in our ntdll.lib but it is * exported on 2K+ */ HANDLE ntdll_base = get_module_handle(L"ntdll.dll"); ASSERT(ntdll_base != NULL); hook_location = (uint64)GET_PROC_ADDR(ntdll_base, "LdrInitializeThunk"); ASSERT(hook_location != 0); } else hook_location = (uint64)KiUserApcDispatcher; ASSERT(map); break; } case INJECT_LOCATION_KiUserException: hook_location = (uint64)KiUserExceptionDispatcher; break; case INJECT_LOCATION_ImageEntry: hook_location = get_remote_process_entry(phandle, &x86_code); late_injection = true; break; case INJECT_LOCATION_ThreadStart: late_injection = true; /* Try to get the actual thread context if possible. * We next try looking in the remote ntdll for RtlUserThreadStart. * If we can't find the thread start, we fall back to the image entry, which * is not many instructions later. We also need to call this first to set * "x86_code": */ image_entry = get_remote_process_entry(phandle, &x86_code); if (thandle != NULL) { /* We can get the context for same-bitwidth, or (below) for parent32, * child64. For parent64, child32, a regular query gives us * ntdll64!RtlUserThreadStart, which our gencode can't reach and which * is not actually executed: we'd need a reverse switch_modes_and_call? * For now we rely on the get_remote_proc_address() and assume that's * the thread start for parent64, child32. */ if (IF_X64(!) is_32bit_process(phandle)) { cxt.cxt.ContextFlags = CONTEXT_CONTROL; if (NT_SUCCESS(nt_get_context(thandle, &cxt.cxt))) hook_location = cxt.cxt.CXT_XIP; } #ifndef X64 else { cxt.cxt64.ContextFlags = CONTEXT_CONTROL; if (thread_get_context_64(thandle, &cxt.cxt64)) hook_location = cxt.cxt64.Rip; } #endif } if (hook_location == 0) { bool target_64 = !x86_code IF_X64(|| DYNAMO_OPTION(inject_x64)); uint64 ntdll_base = find_remote_ntdll_base(phandle, target_64); uint64 thread_start = get_remote_proc_address(phandle, ntdll_base, "RtlUserThreadStart"); if (thread_start != 0) hook_location = thread_start; } if (hook_location == 0) { /* Fall back to the image entry which is just a few instructions later. */ hook_location = image_entry; } break; default: ASSERT_NOT_REACHED(); goto error; } /* read in code at hook */ if (!read_remote_memory_maybe64(phandle, hook_location, hook_buf, sizeof(hook_buf), &num_bytes_out) || num_bytes_out != sizeof(hook_buf)) { goto error; } /* Win8 wow64 has ntdll up high but it reserves all the reachable addresses, * so we cannot use a relative jump to reach our code. Rather than have * different hooks for different situations, we just always do an indirect * jump for x64. Plus we always save the max size we need for that jump. * We assume there's no other thread this early (already assuming that * anyway) and that we restore the hook before we do anything; plus, the * routines we're hooking are big enough that we won't clobber anything * else. Thus, we pass NULL instead of hook_location for must_reach. */ if (map) { hook_target = inject_gencode_mapped(phandle, dynamo_path, hook_location, hook_buf, NULL, x86_code, late_injection); } else { /* No support for 32-to-64. */ hook_target = (uint64)inject_gencode_at_ldr( phandle, dynamo_path, inject_location, inject_address, (void *)(ptr_int_t)hook_location, hook_buf, NULL); } if (hook_target == 0) goto error; bool skip_hook = false; if (inject_location == INJECT_LOCATION_ThreadStart && hook_location != image_entry && thandle != NULL) { /* XXX i#803: Having a hook at the thread start seems to cause strange * instability. We instead set the thread context, like thread injection * does. We should better understand the problems. * If we successfully set the context, we skip the hook. The gencode * will still write the original instructions on top (a nop). */ if (IF_X64_ELSE(true, is_32bit_process(phandle))) { cxt.cxt.ContextFlags = CONTEXT_CONTROL; if (NT_SUCCESS(nt_get_context(thandle, &cxt.cxt))) { cxt.cxt.CXT_XIP = (ptr_uint_t)hook_target; if (NT_SUCCESS(nt_set_context(thandle, &cxt.cxt))) skip_hook = true; } } #ifndef X64 else { cxt.cxt64.ContextFlags = CONTEXT_CONTROL; if (thread_get_context_64(thandle, &cxt.cxt64)) { cxt.cxt64.Rip = hook_target; if (thread_set_context_64(thandle, &cxt.cxt64)) { skip_hook = true; } } } #endif } if (!skip_hook) { /* Place hook */ if (REL32_REACHABLE((int64)hook_location + 5, (int64)hook_target)) { hook_buf[0] = JMP_REL32; *(int *)(&hook_buf[1]) = (int)((int64)hook_target - ((int64)hook_location + 5)); } else { hook_buf[0] = JMP_ABS_IND64_OPCODE; hook_buf[1] = JMP_ABS_MEM_IND64_MODRM; *(int *)(&hook_buf[2]) = 0; /* rip-rel to following address */ *(uint64 *)(&hook_buf[6]) = hook_target; } } /* Even if skipping we have to mark writable since gencode writes to it. */ if (!remote_protect_virtual_memory_maybe64(phandle, hook_location, sizeof(hook_buf), PAGE_EXECUTE_READWRITE, &old_prot)) { goto error; } if (!write_remote_memory_maybe64(phandle, hook_location, hook_buf, sizeof(hook_buf), &num_bytes_out) || num_bytes_out != sizeof(hook_buf)) { goto error; } if (!map) { /* For map we restore the hook from gencode to avoid having to pass * the displaced code around. But, we can't invoke lib routines easily, * so we can't mark +w from gencode easily: so we just leave it +w * and restore to +rx in dynamorio_earliest_init_takeover_C(). */ if (!remote_protect_virtual_memory_maybe64( phandle, hook_location, sizeof(hook_buf), old_prot, &old_prot)) { goto error; } } return true; error: /* we do not recover any changes in the child's address space */ return false; }
1
24,068
> Note that allocation of vmheap fails upon initializing dynamoRIO 64 on WoW64 processes. Thus, we need to pass -reachable_heap to avoid having to make this allocation. This should be solved by changing the default `vmheap_size` to be much smaller than 8GB for x64 DR inside WOW64.
DynamoRIO-dynamorio
c
@@ -200,6 +200,12 @@ module RSpec::Core @exception_presenter.fully_formatted(failure_number, colorizer) end + # @return [Array] The failure information fully formatted in the way that + # RSpec's built-in formatters emit, split by line. + def fully_formatted_lines(failure_number, colorizer=::RSpec::Core::Formatters::ConsoleCodes) + @exception_presenter.fully_formatted_lines(failure_number, colorizer) + end + private def initialize(example, exception_presenter=Formatters::ExceptionPresenter::Factory.new(example).build)
1
RSpec::Support.require_rspec_core "formatters/console_codes" RSpec::Support.require_rspec_core "formatters/exception_presenter" RSpec::Support.require_rspec_core "formatters/helpers" RSpec::Support.require_rspec_core "shell_escape" module RSpec::Core # Notifications are value objects passed to formatters to provide them # with information about a particular event of interest. module Notifications # @private module NullColorizer module_function def wrap(line, _code_or_symbol) line end end # The `StartNotification` represents a notification sent by the reporter # when the suite is started. It contains the expected amount of examples # to be executed, and the load time of RSpec. # # @attr count [Fixnum] the number counted # @attr load_time [Float] the number of seconds taken to boot RSpec # and load the spec files StartNotification = Struct.new(:count, :load_time) # The `ExampleNotification` represents notifications sent by the reporter # which contain information about the current (or soon to be) example. # It is used by formatters to access information about that example. # # @example # def example_started(notification) # puts "Hey I started #{notification.example.description}" # end # # @attr example [RSpec::Core::Example] the current example ExampleNotification = Struct.new(:example) class ExampleNotification # @private def self.for(example) execution_result = example.execution_result return SkippedExampleNotification.new(example) if execution_result.example_skipped? return new(example) unless execution_result.status == :pending || execution_result.status == :failed klass = if execution_result.pending_fixed? PendingExampleFixedNotification elsif execution_result.status == :pending PendingExampleFailedAsExpectedNotification else FailedExampleNotification end klass.new(example) end private_class_method :new end # The `ExamplesNotification` represents notifications sent by the reporter # which contain information about the suites examples. # # @example # def stop(notification) # puts "Hey I ran #{notification.examples.size}" # end # class ExamplesNotification def initialize(reporter) @reporter = reporter end # @return [Array<RSpec::Core::Example>] list of examples def examples @reporter.examples end # @return [Array<RSpec::Core::Example>] list of failed examples def failed_examples @reporter.failed_examples end # @return [Array<RSpec::Core::Example>] list of pending examples def pending_examples @reporter.pending_examples end # @return [Array<RSpec::Core::Notifications::ExampleNotification>] # returns examples as notifications def notifications @notifications ||= format_examples(examples) end # @return [Array<RSpec::Core::Notifications::FailedExampleNotification>] # returns failed examples as notifications def failure_notifications @failed_notifications ||= format_examples(failed_examples) end # @return [Array<RSpec::Core::Notifications::SkippedExampleNotification, # RSpec::Core::Notifications::PendingExampleFailedAsExpectedNotification>] # returns pending examples as notifications def pending_notifications @pending_notifications ||= format_examples(pending_examples) end # @return [String] The list of failed examples, fully formatted in the way # that RSpec's built-in formatters emit. def fully_formatted_failed_examples(colorizer=::RSpec::Core::Formatters::ConsoleCodes) formatted = "\nFailures:\n" failure_notifications.each_with_index do |failure, index| formatted << failure.fully_formatted(index.next, colorizer) end formatted end # @return [String] The list of pending examples, fully formatted in the # way that RSpec's built-in formatters emit. def fully_formatted_pending_examples(colorizer=::RSpec::Core::Formatters::ConsoleCodes) formatted = "\nPending: (Failures listed here are expected and do not affect your suite's status)\n" pending_notifications.each_with_index do |notification, index| formatted << notification.fully_formatted(index.next, colorizer) end formatted end private def format_examples(examples) examples.map do |example| ExampleNotification.for(example) end end end # The `FailedExampleNotification` extends `ExampleNotification` with # things useful for examples that have failure info -- typically a # failed or pending spec. # # @example # def example_failed(notification) # puts "Hey I failed :(" # puts "Here's my stack trace" # puts notification.exception.backtrace.join("\n") # end # # @attr [RSpec::Core::Example] example the current example # @see ExampleNotification class FailedExampleNotification < ExampleNotification public_class_method :new # @return [Exception] The example failure def exception @exception_presenter.exception end # @return [String] The example description def description @exception_presenter.description end # Returns the message generated for this failure line by line. # # @return [Array<String>] The example failure message def message_lines @exception_presenter.message_lines end # Returns the message generated for this failure colorized line by line. # # @param colorizer [#wrap] An object to colorize the message_lines by # @return [Array<String>] The example failure message colorized def colorized_message_lines(colorizer=::RSpec::Core::Formatters::ConsoleCodes) @exception_presenter.colorized_message_lines(colorizer) end # Returns the failures formatted backtrace. # # @return [Array<String>] the examples backtrace lines def formatted_backtrace @exception_presenter.formatted_backtrace end # Returns the failures colorized formatted backtrace. # # @param colorizer [#wrap] An object to colorize the message_lines by # @return [Array<String>] the examples colorized backtrace lines def colorized_formatted_backtrace(colorizer=::RSpec::Core::Formatters::ConsoleCodes) @exception_presenter.colorized_formatted_backtrace(colorizer) end # @return [String] The failure information fully formatted in the way that # RSpec's built-in formatters emit. def fully_formatted(failure_number, colorizer=::RSpec::Core::Formatters::ConsoleCodes) @exception_presenter.fully_formatted(failure_number, colorizer) end private def initialize(example, exception_presenter=Formatters::ExceptionPresenter::Factory.new(example).build) @exception_presenter = exception_presenter super(example) end end # @deprecated Use {FailedExampleNotification} instead. class PendingExampleFixedNotification < FailedExampleNotification; end # @deprecated Use {FailedExampleNotification} instead. class PendingExampleFailedAsExpectedNotification < FailedExampleNotification; end # The `SkippedExampleNotification` extends `ExampleNotification` with # things useful for specs that are skipped. # # @attr [RSpec::Core::Example] example the current example # @see ExampleNotification class SkippedExampleNotification < ExampleNotification public_class_method :new # @return [String] The pending detail fully formatted in the way that # RSpec's built-in formatters emit. def fully_formatted(pending_number, colorizer=::RSpec::Core::Formatters::ConsoleCodes) formatted_caller = RSpec.configuration.backtrace_formatter.backtrace_line(example.location) colorizer.wrap("\n #{pending_number}) #{example.full_description}", :pending) << "\n " << Formatters::ExceptionPresenter::PENDING_DETAIL_FORMATTER.call(example, colorizer) << "\n" << colorizer.wrap(" # #{formatted_caller}\n", :detail) end end # The `GroupNotification` represents notifications sent by the reporter # which contain information about the currently running (or soon to be) # example group. It is used by formatters to access information about that # group. # # @example # def example_group_started(notification) # puts "Hey I started #{notification.group.description}" # end # @attr group [RSpec::Core::ExampleGroup] the current group GroupNotification = Struct.new(:group) # The `MessageNotification` encapsulates generic messages that the reporter # sends to formatters. # # @attr message [String] the message MessageNotification = Struct.new(:message) # The `SeedNotification` holds the seed used to randomize examples and # whether that seed has been used or not. # # @attr seed [Fixnum] the seed used to randomize ordering # @attr used [Boolean] whether the seed has been used or not SeedNotification = Struct.new(:seed, :used) class SeedNotification # @api # @return [Boolean] has the seed been used? def seed_used? !!used end private :used # @return [String] The seed information fully formatted in the way that # RSpec's built-in formatters emit. def fully_formatted "\nRandomized with seed #{seed}\n" end end # The `SummaryNotification` holds information about the results of running # a test suite. It is used by formatters to provide information at the end # of the test run. # # @attr duration [Float] the time taken (in seconds) to run the suite # @attr examples [Array<RSpec::Core::Example>] the examples run # @attr failed_examples [Array<RSpec::Core::Example>] the failed examples # @attr pending_examples [Array<RSpec::Core::Example>] the pending examples # @attr load_time [Float] the number of seconds taken to boot RSpec # and load the spec files # @attr errors_outside_of_examples_count [Integer] the number of errors that # have occurred processing # the spec suite SummaryNotification = Struct.new(:duration, :examples, :failed_examples, :pending_examples, :load_time, :errors_outside_of_examples_count) class SummaryNotification # @api # @return [Fixnum] the number of examples run def example_count @example_count ||= examples.size end # @api # @return [Fixnum] the number of failed examples def failure_count @failure_count ||= failed_examples.size end # @api # @return [Fixnum] the number of pending examples def pending_count @pending_count ||= pending_examples.size end # @api # @return [String] A line summarising the result totals of the spec run. def totals_line summary = Formatters::Helpers.pluralize(example_count, "example") summary << ", " << Formatters::Helpers.pluralize(failure_count, "failure") summary << ", #{pending_count} pending" if pending_count > 0 if errors_outside_of_examples_count > 0 summary << ", " summary << Formatters::Helpers.pluralize(errors_outside_of_examples_count, "error") summary << " occurred outside of examples" end summary end # @api public # # Wraps the results line with colors based on the configured # colors for failure, pending, and success. Defaults to red, # yellow, green accordingly. # # @param colorizer [#wrap] An object which supports wrapping text with # specific colors. # @return [String] A colorized results line. def colorized_totals_line(colorizer=::RSpec::Core::Formatters::ConsoleCodes) if failure_count > 0 colorizer.wrap(totals_line, RSpec.configuration.failure_color) elsif pending_count > 0 colorizer.wrap(totals_line, RSpec.configuration.pending_color) else colorizer.wrap(totals_line, RSpec.configuration.success_color) end end # @api public # # Formats failures into a rerunable command format. # # @param colorizer [#wrap] An object which supports wrapping text with # specific colors. # @return [String] A colorized summary line. def colorized_rerun_commands(colorizer=::RSpec::Core::Formatters::ConsoleCodes) "\nFailed examples:\n\n" + failed_examples.map do |example| colorizer.wrap("rspec #{rerun_argument_for(example)}", RSpec.configuration.failure_color) + " " + colorizer.wrap("# #{example.full_description}", RSpec.configuration.detail_color) end.join("\n") end # @return [String] a formatted version of the time it took to run the # suite def formatted_duration Formatters::Helpers.format_duration(duration) end # @return [String] a formatted version of the time it took to boot RSpec # and load the spec files def formatted_load_time Formatters::Helpers.format_duration(load_time) end # @return [String] The summary information fully formatted in the way that # RSpec's built-in formatters emit. def fully_formatted(colorizer=::RSpec::Core::Formatters::ConsoleCodes) formatted = "\nFinished in #{formatted_duration} " \ "(files took #{formatted_load_time} to load)\n" \ "#{colorized_totals_line(colorizer)}\n" unless failed_examples.empty? formatted << colorized_rerun_commands(colorizer) << "\n" end formatted end private include RSpec::Core::ShellEscape def rerun_argument_for(example) location = example.location_rerun_argument return location unless duplicate_rerun_locations.include?(location) conditionally_quote(example.id) end def duplicate_rerun_locations @duplicate_rerun_locations ||= begin locations = RSpec.world.all_examples.map(&:location_rerun_argument) Set.new.tap do |s| locations.group_by { |l| l }.each do |l, ls| s << l if ls.count > 1 end end end end end # The `ProfileNotification` holds information about the results of running a # test suite when profiling is enabled. It is used by formatters to provide # information at the end of the test run for profiling information. # # @attr duration [Float] the time taken (in seconds) to run the suite # @attr examples [Array<RSpec::Core::Example>] the examples run # @attr number_of_examples [Fixnum] the number of examples to profile # @attr example_groups [Array<RSpec::Core::Profiler>] example groups run class ProfileNotification def initialize(duration, examples, number_of_examples, example_groups) @duration = duration @examples = examples @number_of_examples = number_of_examples @example_groups = example_groups end attr_reader :duration, :examples, :number_of_examples # @return [Array<RSpec::Core::Example>] the slowest examples def slowest_examples @slowest_examples ||= examples.sort_by do |example| -example.execution_result.run_time end.first(number_of_examples) end # @return [Float] the time taken (in seconds) to run the slowest examples def slow_duration @slow_duration ||= slowest_examples.inject(0.0) do |i, e| i + e.execution_result.run_time end end # @return [String] the percentage of total time taken def percentage @percentage ||= begin time_taken = slow_duration / duration '%.1f' % ((time_taken.nan? ? 0.0 : time_taken) * 100) end end # @return [Array<RSpec::Core::Example>] the slowest example groups def slowest_groups @slowest_groups ||= calculate_slowest_groups end private def calculate_slowest_groups # stop if we've only one example group return {} if @example_groups.keys.length <= 1 @example_groups.each_value do |hash| hash[:average] = hash[:total_time].to_f / hash[:count] end groups = @example_groups.sort_by { |_, hash| -hash[:average] }.first(number_of_examples) groups.map { |group, data| [group.location, data] } end end # The `DeprecationNotification` is issued by the reporter when a deprecated # part of RSpec is encountered. It represents information about the # deprecated call site. # # @attr message [String] A custom message about the deprecation # @attr deprecated [String] A custom message about the deprecation (alias of # message) # @attr replacement [String] An optional replacement for the deprecation # @attr call_site [String] An optional call site from which the deprecation # was issued DeprecationNotification = Struct.new(:deprecated, :message, :replacement, :call_site) class DeprecationNotification private_class_method :new # @api # Convenience way to initialize the notification def self.from_hash(data) new data[:deprecated], data[:message], data[:replacement], data[:call_site] end end # `NullNotification` represents a placeholder value for notifications that # currently require no information, but we may wish to extend in future. class NullNotification end # `CustomNotification` is used when sending custom events to formatters / # other registered listeners, it creates attributes based on supplied hash # of options. class CustomNotification < Struct # @param options [Hash] A hash of method / value pairs to create on this notification # @return [CustomNotification] # # Build a custom notification based on the supplied option key / values. def self.for(options={}) return NullNotification if options.keys.empty? new(*options.keys).new(*options.values) end end end end
1
16,459
Is it really worth expanding our public API for this? After all, isn't calling this the same as calling `notification.fully_formatted(...).lines`? If so, I'd rather not widen our API (and thus, increase our maintenance burden) when it's so simple to get all the lines already.
rspec-rspec-core
rb
@@ -14,18 +14,10 @@ */ package com.google.api.codegen; -/** - * A SnippetSetRunner takes the element, snippet file, and context as input and then uses the - * Snippet Set templating engine to generate an output document. - */ +/** Defines the SNIPPET_RESOURCE_ROOT. */ public final class SnippetSetRunner { /** The path to the root of snippet resources. */ public static final String SNIPPET_RESOURCE_ROOT = SnippetSetRunner.class.getPackage().getName().replace('.', '/'); - - public interface Generator<Element> { - /** Runs the code generation. */ - GeneratedResult generate(Element element, String snippetFileName, CodegenContext context); - } }
1
/* Copyright 2016 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen; /** * A SnippetSetRunner takes the element, snippet file, and context as input and then uses the * Snippet Set templating engine to generate an output document. */ public final class SnippetSetRunner { /** The path to the root of snippet resources. */ public static final String SNIPPET_RESOURCE_ROOT = SnippetSetRunner.class.getPackage().getName().replace('.', '/'); public interface Generator<Element> { /** Runs the code generation. */ GeneratedResult generate(Element element, String snippetFileName, CodegenContext context); } }
1
24,284
Is it necessary to keep this class around just to define this constant? Or can we place it somewhere else?
googleapis-gapic-generator
java
@@ -60,6 +60,10 @@ type rule struct { // The parent Policy ID. Used to identify rules belong to a specified // policy for deletion. PolicyUID types.UID + // The metadata of parent Policy. Used to associate the rule with Policy + // for troubleshooting purpose (logging and CLI). + PolicyName string + PolicyNamespace string } // hashRule calculates a string based on the rule's content.
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package networkpolicy import ( "crypto/sha1" "encoding/hex" "encoding/json" "fmt" "sync" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/apis/networking/v1beta1" ) const ( RuleIDLength = 16 appliedToGroupIndex = "appliedToGroup" addressGroupIndex = "addressGroup" policyIndex = "policy" ) // rule is the struct stored in ruleCache, it contains necessary information // to construct a complete rule that can be used by reconciler to enforce. // The K8s NetworkPolicy object doesn't provide ID for its rule, here we // calculate an ID based on the rule's fields. That means: // 1. If a rule's selector/services/direction changes, it becomes "another" rule. // 2. If inserting rules before a rule or shuffling rules in a NetworkPolicy, we // can know the existing rules don't change and skip processing them. type rule struct { // ID is calculated from the hash value of all other fields. ID string // Direction of this rule. Direction v1beta1.Direction // Source Address of this rule, can't coexist with To. From v1beta1.NetworkPolicyPeer // Destination Address of this rule, can't coexist with From. To v1beta1.NetworkPolicyPeer // Protocols and Ports of this rule. Services []v1beta1.Service // Targets of this rule. AppliedToGroups []string // The parent Policy ID. Used to identify rules belong to a specified // policy for deletion. PolicyUID types.UID } // hashRule calculates a string based on the rule's content. func hashRule(r *rule) string { hash := sha1.New() b, _ := json.Marshal(r) hash.Write(b) hashValue := hex.EncodeToString(hash.Sum(nil)) return hashValue[:RuleIDLength] } // CompletedRule contains IPAddresses and Pods flattened from AddressGroups and AppliedToGroups. // It's the struct used by reconciler. type CompletedRule struct { *rule // Source Pods of this rule, can't coexist with ToAddresses. FromAddresses v1beta1.GroupMemberPodSet // Destination Pods of this rule, can't coexist with FromAddresses. ToAddresses v1beta1.GroupMemberPodSet // Target Pods of this rule. Pods v1beta1.GroupMemberPodSet } // String returns the string representation of the CompletedRule. func (r *CompletedRule) String() string { var addressString string if r.Direction == v1beta1.DirectionIn { addressString = fmt.Sprintf("FromAddressGroups: %d, FromIPBlocks: %d, FromAddresses: %d", len(r.From.AddressGroups), len(r.From.IPBlocks), len(r.FromAddresses)) } else { addressString = fmt.Sprintf("ToAddressGroups: %d, ToIPBlocks: %d, ToAddresses: %d", len(r.To.AddressGroups), len(r.To.IPBlocks), len(r.ToAddresses)) } return fmt.Sprintf("%s (Direction: %v, Pods: %d, %s, Services: %d)", r.ID, r.Direction, len(r.Pods), addressString, len(r.Services)) } // ruleCache caches Antrea AddressGroups, AppliedToGroups and NetworkPolicies, // can construct complete rules that can be used by reconciler to enforce. type ruleCache struct { podSetLock sync.RWMutex // podSetByGroup stores the AppliedToGroup members. // It is a mapping from group name to a set of Pods. podSetByGroup map[string]v1beta1.GroupMemberPodSet addressSetLock sync.RWMutex // addressSetByGroup stores the AddressGroup members. // It is a mapping from group name to a set of Pods. addressSetByGroup map[string]v1beta1.GroupMemberPodSet policySetLock sync.RWMutex // policySet is a set to store NetworkPolicy UID strings. policySet sets.String // rules is a storage that supports listing rules using multiple indexing functions. // rules is thread-safe. rules cache.Indexer // dirtyRuleHandler is a callback that is run upon finding a rule out-of-sync. dirtyRuleHandler func(string) // podUpdates is a channel for receiving Pod updates from CNIServer. podUpdates <-chan v1beta1.PodReference } // TODO: save namespace information in cache. func (c *ruleCache) GetNetworkPolicies() []v1beta1.NetworkPolicy { var ret []v1beta1.NetworkPolicy c.policySetLock.RLock() defer c.policySetLock.RUnlock() for uid := range c.policySet { np := v1beta1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Name: uid}, } rules, _ := c.rules.ByIndex(policyIndex, uid) for _, ruleObj := range rules { rule := ruleObj.(*rule) np.Rules = append(np.Rules, v1beta1.NetworkPolicyRule{ Direction: rule.Direction, From: rule.From, To: rule.To, Services: rule.Services, }) np.AppliedToGroups = rule.AppliedToGroups } ret = append(ret, np) } return ret } func (c *ruleCache) GetAddressGroups() []v1beta1.AddressGroup { var ret []v1beta1.AddressGroup c.addressSetLock.RLock() defer c.addressSetLock.RUnlock() for k, v := range c.addressSetByGroup { var pods []v1beta1.GroupMemberPod for _, pod := range v { pods = append(pods, *pod) } ret = append(ret, v1beta1.AddressGroup{ ObjectMeta: metav1.ObjectMeta{Name: k}, Pods: pods, }) } return ret } func (c *ruleCache) GetAppliedToGroups() []v1beta1.AppliedToGroup { var ret []v1beta1.AppliedToGroup c.podSetLock.RLock() defer c.podSetLock.RUnlock() for k, v := range c.podSetByGroup { var pods []v1beta1.GroupMemberPod for _, pod := range v.Items() { pods = append(pods, *pod) } ret = append(ret, v1beta1.AppliedToGroup{ ObjectMeta: metav1.ObjectMeta{Name: k}, Pods: pods, }) } return ret } // ruleKeyFunc knows how to get key of a *rule. func ruleKeyFunc(obj interface{}) (string, error) { rule := obj.(*rule) return rule.ID, nil } // addressGroupIndexFunc knows how to get addressGroups of a *rule. // It's provided to cache.Indexer to build an index of addressGroups. func addressGroupIndexFunc(obj interface{}) ([]string, error) { rule := obj.(*rule) addressGroups := make([]string, 0, len(rule.From.AddressGroups)+len(rule.To.AddressGroups)) addressGroups = append(addressGroups, rule.From.AddressGroups...) addressGroups = append(addressGroups, rule.To.AddressGroups...) return addressGroups, nil } // appliedToGroupIndexFunc knows how to get appliedToGroups of a *rule. // It's provided to cache.Indexer to build an index of appliedToGroups. func appliedToGroupIndexFunc(obj interface{}) ([]string, error) { rule := obj.(*rule) return rule.AppliedToGroups, nil } // policyIndexFunc knows how to get NetworkPolicy UID of a *rule. // It's provided to cache.Indexer to build an index of NetworkPolicy. func policyIndexFunc(obj interface{}) ([]string, error) { rule := obj.(*rule) return []string{string(rule.PolicyUID)}, nil } // newRuleCache returns a new *ruleCache. func newRuleCache(dirtyRuleHandler func(string), podUpdate <-chan v1beta1.PodReference) *ruleCache { rules := cache.NewIndexer( ruleKeyFunc, cache.Indexers{addressGroupIndex: addressGroupIndexFunc, appliedToGroupIndex: appliedToGroupIndexFunc, policyIndex: policyIndexFunc}, ) cache := &ruleCache{ podSetByGroup: make(map[string]v1beta1.GroupMemberPodSet), addressSetByGroup: make(map[string]v1beta1.GroupMemberPodSet), policySet: sets.NewString(), rules: rules, dirtyRuleHandler: dirtyRuleHandler, podUpdates: podUpdate, } go cache.processPodUpdates() return cache } // processPodUpdates is an infinite loop that takes Pod update events from the // channel, finds out AppliedToGroups that contains this Pod and trigger // reconciling of related rules. // It can enforce NetworkPolicies to newly added Pods right after CNI ADD is // done if antrea-controller has computed the Pods' policies and propagated // them to this Node by their labels and NodeName, instead of waiting for their // IPs are reported to kube-apiserver and processed by antrea-controller. func (c *ruleCache) processPodUpdates() { for { select { case pod := <-c.podUpdates: func() { memberPod := &v1beta1.GroupMemberPod{Pod: &pod} c.podSetLock.RLock() defer c.podSetLock.RUnlock() for group, podSet := range c.podSetByGroup { if podSet.Has(memberPod) { c.onAppliedToGroupUpdate(group) } } }() } } } // GetAddressGroupNum gets the number of AddressGroup. func (c *ruleCache) GetAddressGroupNum() int { c.addressSetLock.RLock() defer c.addressSetLock.RUnlock() return len(c.addressSetByGroup) } // AddAddressGroup adds a new *v1beta1.AddressGroup to the cache. The rules // referencing it will be regarded as dirty. // It's safe to add an AddressGroup multiple times as it only overrides the // map, this could happen when the watcher reconnects to the Apiserver. func (c *ruleCache) AddAddressGroup(group *v1beta1.AddressGroup) error { c.addressSetLock.Lock() defer c.addressSetLock.Unlock() podSet := v1beta1.GroupMemberPodSet{} for i := range group.Pods { // Must not store address of loop iterator variable as it's the same // address taking different values in each loop iteration, otherwise // podSet would eventually contain only the last value. // https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable podSet.Insert(&group.Pods[i]) } c.addressSetByGroup[group.Name] = podSet c.onAddressGroupUpdate(group.Name) return nil } // PatchAddressGroup updates a cached *v1beta1.AddressGroup. // The rules referencing it will be regarded as dirty. func (c *ruleCache) PatchAddressGroup(patch *v1beta1.AddressGroupPatch) error { c.addressSetLock.Lock() defer c.addressSetLock.Unlock() podSet, exists := c.addressSetByGroup[patch.Name] if !exists { return fmt.Errorf("AddressGroup %v doesn't exist in cache, can't be patched", patch.Name) } for i := range patch.AddedPods { podSet.Insert(&patch.AddedPods[i]) } for i := range patch.RemovedPods { podSet.Delete(&patch.RemovedPods[i]) } c.onAddressGroupUpdate(patch.Name) return nil } // DeleteAddressGroup deletes a cached *v1beta1.AddressGroup. // It should only happen when a group is no longer referenced by any rule, so // no need to mark dirty rules. func (c *ruleCache) DeleteAddressGroup(group *v1beta1.AddressGroup) error { c.addressSetLock.Lock() defer c.addressSetLock.Unlock() delete(c.addressSetByGroup, group.Name) return nil } // GetAppliedToGroupNum gets the number of AppliedToGroup. func (c *ruleCache) GetAppliedToGroupNum() int { c.podSetLock.RLock() defer c.podSetLock.RUnlock() return len(c.podSetByGroup) } // AddAppliedToGroup adds a new *v1beta1.AppliedToGroup to the cache. The rules // referencing it will be regarded as dirty. // It's safe to add an AppliedToGroup multiple times as it only overrides the // map, this could happen when the watcher reconnects to the Apiserver. func (c *ruleCache) AddAppliedToGroup(group *v1beta1.AppliedToGroup) error { c.podSetLock.Lock() defer c.podSetLock.Unlock() podSet := v1beta1.GroupMemberPodSet{} for i := range group.Pods { podSet.Insert(&group.Pods[i]) } c.podSetByGroup[group.Name] = podSet c.onAppliedToGroupUpdate(group.Name) return nil } // PatchAppliedToGroup updates a cached *v1beta1.AppliedToGroupPatch. // The rules referencing it will be regarded as dirty. func (c *ruleCache) PatchAppliedToGroup(patch *v1beta1.AppliedToGroupPatch) error { c.podSetLock.Lock() defer c.podSetLock.Unlock() podSet, exists := c.podSetByGroup[patch.Name] if !exists { return fmt.Errorf("AppliedToGroup %v doesn't exist in cache, can't be patched", patch.Name) } for i := range patch.AddedPods { podSet.Insert(&patch.AddedPods[i]) } for i := range patch.RemovedPods { podSet.Delete(&patch.RemovedPods[i]) } c.onAppliedToGroupUpdate(patch.Name) return nil } // DeleteAppliedToGroup deletes a cached *v1beta1.AppliedToGroup. // It should only happen when a group is no longer referenced by any rule, so // no need to mark dirty rules. func (c *ruleCache) DeleteAppliedToGroup(group *v1beta1.AppliedToGroup) error { c.podSetLock.Lock() defer c.podSetLock.Unlock() delete(c.podSetByGroup, group.Name) return nil } // toRule converts v1beta1.NetworkPolicyRule to *rule. func toRule(r *v1beta1.NetworkPolicyRule, policy *v1beta1.NetworkPolicy) *rule { rule := &rule{ Direction: r.Direction, From: r.From, To: r.To, Services: r.Services, AppliedToGroups: policy.AppliedToGroups, PolicyUID: policy.UID, } rule.ID = hashRule(rule) return rule } // GetNetworkPolicyNum gets the number of NetworkPolicy. func (c *ruleCache) GetNetworkPolicyNum() int { c.policySetLock.RLock() defer c.policySetLock.RUnlock() return c.policySet.Len() } // AddNetworkPolicy adds a new *v1beta1.NetworkPolicy to the cache. // It could happen that an existing NetworkPolicy is "added" again when the // watcher reconnects to the Apiserver, we use the same processing as // UpdateNetworkPolicy to ensure orphan rules are removed. func (c *ruleCache) AddNetworkPolicy(policy *v1beta1.NetworkPolicy) error { c.policySetLock.Lock() defer c.policySetLock.Unlock() c.policySet.Insert(string(policy.UID)) return c.UpdateNetworkPolicy(policy) } // UpdateNetworkPolicy updates a cached *v1beta1.NetworkPolicy. // The added rules and removed rules will be regarded as dirty. func (c *ruleCache) UpdateNetworkPolicy(policy *v1beta1.NetworkPolicy) error { existingRules, _ := c.rules.ByIndex(policyIndex, string(policy.UID)) ruleByID := map[string]interface{}{} for _, r := range existingRules { ruleByID[r.(*rule).ID] = r } for i := range policy.Rules { rule := toRule(&policy.Rules[i], policy) if _, exists := ruleByID[rule.ID]; exists { // If rule already exists, remove it from the map so the ones left finally are orphaned. klog.V(2).Infof("Rule %v was not changed", rule.ID) delete(ruleByID, rule.ID) } else { // If rule doesn't exist, add it to cache, mark it as dirty. c.rules.Add(rule) c.dirtyRuleHandler(rule.ID) } } // At this moment, the remaining rules are orphaned, remove them from store and mark them as dirty. for ruleID, rule := range ruleByID { c.rules.Delete(rule) c.dirtyRuleHandler(ruleID) } return nil } // DeleteNetworkPolicy deletes a cached *v1beta1.NetworkPolicy. // All its rules will be regarded as dirty. func (c *ruleCache) DeleteNetworkPolicy(policy *v1beta1.NetworkPolicy) error { c.policySetLock.Lock() defer c.policySetLock.Unlock() c.policySet.Delete(string(policy.UID)) existingRules, _ := c.rules.ByIndex(policyIndex, string(policy.UID)) for _, r := range existingRules { ruleID := r.(*rule).ID c.rules.Delete(r) c.dirtyRuleHandler(ruleID) } return nil } // GetCompletedRule constructs a *CompletedRule for the provided ruleID. // If the rule is not found or not completed due to missing group data, // the return value will indicate it. func (c *ruleCache) GetCompletedRule(ruleID string) (completedRule *CompletedRule, exists bool, completed bool) { obj, exists, _ := c.rules.GetByKey(ruleID) if !exists { return nil, false, false } r := obj.(*rule) var fromAddresses, toAddresses v1beta1.GroupMemberPodSet if r.Direction == v1beta1.DirectionIn { fromAddresses, completed = c.unionAddressGroups(r.From.AddressGroups) } else { toAddresses, completed = c.unionAddressGroups(r.To.AddressGroups) } if !completed { return nil, true, false } pods, completed := c.unionAppliedToGroups(r.AppliedToGroups) if !completed { return nil, true, false } completedRule = &CompletedRule{ rule: r, FromAddresses: fromAddresses, ToAddresses: toAddresses, Pods: pods, } return completedRule, true, true } // onAppliedToGroupUpdate gets rules referencing to the provided AppliedToGroup // and mark them as dirty. func (c *ruleCache) onAppliedToGroupUpdate(groupName string) { ruleIDs, _ := c.rules.IndexKeys(appliedToGroupIndex, groupName) for _, ruleID := range ruleIDs { c.dirtyRuleHandler(ruleID) } } // onAddressGroupUpdate gets rules referencing to the provided AddressGroup // and mark them as dirty. func (c *ruleCache) onAddressGroupUpdate(groupName string) { ruleIDs, _ := c.rules.IndexKeys(addressGroupIndex, groupName) for _, ruleID := range ruleIDs { c.dirtyRuleHandler(ruleID) } } // unionAddressGroups gets the union of addresses of the provided address groups. // If any group is not found, nil and false will be returned to indicate the // set is not complete yet. func (c *ruleCache) unionAddressGroups(groupNames []string) (v1beta1.GroupMemberPodSet, bool) { c.addressSetLock.RLock() defer c.addressSetLock.RUnlock() set := v1beta1.NewGroupMemberPodSet() for _, groupName := range groupNames { curSet, exists := c.addressSetByGroup[groupName] if !exists { klog.V(2).Infof("AddressGroup %v was not found", groupName) return nil, false } set = set.Union(curSet) } return set, true } // unionAppliedToGroups gets the union of pods of the provided appliedTo groups. // If any group is not found, nil and false will be returned to indicate the // set is not complete yet. func (c *ruleCache) unionAppliedToGroups(groupNames []string) (v1beta1.GroupMemberPodSet, bool) { c.podSetLock.RLock() defer c.podSetLock.RUnlock() set := v1beta1.NewGroupMemberPodSet() for _, groupName := range groupNames { curSet, exists := c.podSetByGroup[groupName] if !exists { klog.V(2).Infof("AppliedToGroup %v was not found", groupName) return nil, false } set = set.Union(curSet) } return set, true }
1
14,452
Question - do we cache NetworkPolicy itself? If so, here we can point to NetworkPolicy?
antrea-io-antrea
go
@@ -11,7 +11,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" "k8s.io/klog" beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
1
package handler import ( "encoding/json" "fmt" "regexp" "strings" "sync" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog" beehiveContext "github.com/kubeedge/beehive/pkg/core/context" beehiveModel "github.com/kubeedge/beehive/pkg/core/model" "github.com/kubeedge/kubeedge/cloud/pkg/apis/reliablesyncs/v1alpha1" "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/channelq" hubio "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/io" "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model" hubconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config" deviceconst "github.com/kubeedge/kubeedge/cloud/pkg/devicecontroller/constants" edgeconst "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/constants" edgemessagelayer "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/messagelayer" "github.com/kubeedge/kubeedge/cloud/pkg/synccontroller" "github.com/kubeedge/kubeedge/common/constants" "github.com/kubeedge/viaduct/pkg/conn" "github.com/kubeedge/viaduct/pkg/mux" ) // ExitCode exit code type ExitCode int const ( hubioReadFail ExitCode = iota hubioWriteFail messageQueueDisconnect nodeStop nodeDisconnect ) // constants for error message const ( MsgFormatError = "message format not correct" VolumePattern = `^\w[-\w.+]*/` + constants.CSIResourceTypeVolume + `/\w[-\w.+]*` ) // VolumeRegExp is used to validate the volume resource var VolumeRegExp = regexp.MustCompile(VolumePattern) // MessageHandle processes messages between cloud and edge type MessageHandle struct { KeepaliveInterval int WriteTimeout int Nodes sync.Map nodeConns sync.Map nodeLocks sync.Map MessageQueue *channelq.ChannelMessageQueue Handlers []HandleFunc NodeLimit int KeepaliveChannel map[string]chan struct{} MessageAcks sync.Map } type HandleFunc func(hi hubio.CloudHubIO, info *model.HubInfo, exitServe chan ExitCode, stopSendMsg chan struct{}) var once sync.Once // CloudhubHandler the shared handler for both websocket and quic servers var CloudhubHandler *MessageHandle // InitHandler create a handler for websocket and quic servers func InitHandler(eventq *channelq.ChannelMessageQueue) { once.Do(func() { CloudhubHandler = &MessageHandle{ KeepaliveInterval: int(hubconfig.Config.KeepaliveInterval), WriteTimeout: int(hubconfig.Config.WriteTimeout), MessageQueue: eventq, NodeLimit: int(hubconfig.Config.NodeLimit), } CloudhubHandler.KeepaliveChannel = make(map[string]chan struct{}) CloudhubHandler.Handlers = []HandleFunc{ CloudhubHandler.KeepaliveCheckLoop, CloudhubHandler.MessageWriteLoop, CloudhubHandler.ListMessageWriteLoop, } CloudhubHandler.initServerEntries() }) } // initServerEntries register handler func func (mh *MessageHandle) initServerEntries() { mux.Entry(mux.NewPattern("*").Op("*"), mh.HandleServer) } // HandleServer handle all the request from node func (mh *MessageHandle) HandleServer(container *mux.MessageContainer, writer mux.ResponseWriter) { nodeID := container.Header.Get("node_id") projectID := container.Header.Get("project_id") if mh.GetNodeCount() >= mh.NodeLimit { klog.Errorf("Fail to serve node %s, reach node limit", nodeID) return } if container.Message.GetOperation() == model.OpKeepalive { klog.Infof("Keepalive message received from node: %s", nodeID) mh.KeepaliveChannel[nodeID] <- struct{}{} return } // handle the response from edge if VolumeRegExp.MatchString(container.Message.GetResource()) { beehiveContext.SendResp(*container.Message) return } // handle the ack message from edge if container.Message.Router.Operation == beehiveModel.ResponseOperation { if ackChan, ok := mh.MessageAcks.Load(container.Message.Header.ParentID); ok { close(ackChan.(chan struct{})) mh.MessageAcks.Delete(container.Message.Header.ParentID) } return } err := mh.PubToController(&model.HubInfo{ProjectID: projectID, NodeID: nodeID}, container.Message) if err != nil { // if err, we should stop node, write data to edgehub, stop nodify klog.Errorf("Failed to serve handle with error: %s", err.Error()) } } // OnRegister register node on first connection func (mh *MessageHandle) OnRegister(connection conn.Connection) { nodeID := connection.ConnectionState().Headers.Get("node_id") projectID := connection.ConnectionState().Headers.Get("project_id") if _, ok := mh.KeepaliveChannel[nodeID]; !ok { mh.KeepaliveChannel[nodeID] = make(chan struct{}, 1) } io := &hubio.JSONIO{Connection: connection} go mh.ServeConn(io, &model.HubInfo{ProjectID: projectID, NodeID: nodeID}) } // KeepaliveCheckLoop checks whether the edge node is still alive func (mh *MessageHandle) KeepaliveCheckLoop(hi hubio.CloudHubIO, info *model.HubInfo, stopServe chan ExitCode, stopSendMsg chan struct{}) { keepaliveTicker := time.NewTimer(time.Duration(mh.KeepaliveInterval) * time.Second) for { select { case _, ok := <-mh.KeepaliveChannel[info.NodeID]: if !ok { return } klog.Infof("Node %s is still alive", info.NodeID) keepaliveTicker.Reset(time.Duration(mh.KeepaliveInterval) * time.Second) case <-keepaliveTicker.C: klog.Warningf("Timeout to receive heart beat from edge node %s for project %s", info.NodeID, info.ProjectID) stopServe <- nodeDisconnect close(stopSendMsg) return } } } func dumpMessageMetadata(msg *beehiveModel.Message) string { return fmt.Sprintf("id: %s, parent_id: %s, group: %s, source: %s, resource: %s, operation: %s", msg.Header.ID, msg.Header.ParentID, msg.Router.Group, msg.Router.Source, msg.Router.Resource, msg.Router.Operation) } func trimMessage(msg *beehiveModel.Message) { resource := msg.GetResource() if strings.HasPrefix(resource, model.ResNode) { tokens := strings.Split(resource, "/") if len(tokens) < 3 { klog.Warningf("event resource %s starts with node but length less than 3", resource) } else { msg.SetResourceOperation(strings.Join(tokens[2:], "/"), msg.GetOperation()) } } } func notifyEventQueueError(hi hubio.CloudHubIO, code ExitCode, nodeID string) { if code == messageQueueDisconnect { msg := beehiveModel.NewMessage("").BuildRouter(model.GpResource, model.SrcCloudHub, model.NewResource(model.ResNode, nodeID, nil), model.OpDisConnect) err := hi.WriteData(msg) if err != nil { klog.Errorf("fail to notify node %s event queue disconnected, reason: %s", nodeID, err.Error()) } } } func constructConnectMessage(info *model.HubInfo, isConnected bool) *beehiveModel.Message { connected := model.OpConnect if !isConnected { connected = model.OpDisConnect } body := map[string]interface{}{ "event_type": connected, "timestamp": time.Now().Unix(), "client_id": info.NodeID} content, _ := json.Marshal(body) msg := beehiveModel.NewMessage("") msg.BuildRouter(model.SrcCloudHub, model.GpResource, model.NewResource(model.ResNode, info.NodeID, nil), connected) msg.FillBody(content) return msg } func (mh *MessageHandle) PubToController(info *model.HubInfo, msg *beehiveModel.Message) error { msg.SetResourceOperation(fmt.Sprintf("node/%s/%s", info.NodeID, msg.GetResource()), msg.GetOperation()) klog.Infof("event received for node %s %s, content: %s", info.NodeID, dumpMessageMetadata(msg), msg.Content) if model.IsFromEdge(msg) { err := mh.MessageQueue.Publish(msg) if err != nil { // content is not logged since it may contain sensitive information klog.Errorf("fail to publish event for node %s, %s, reason: %s", info.NodeID, dumpMessageMetadata(msg), err.Error()) return err } } return nil } func (mh *MessageHandle) hubIoWrite(hi hubio.CloudHubIO, nodeID string, msg *beehiveModel.Message) error { value, ok := mh.nodeLocks.Load(nodeID) if !ok { return fmt.Errorf("node disconnected") } mutex := value.(*sync.Mutex) mutex.Lock() defer mutex.Unlock() return hi.WriteData(msg) } // ServeConn starts serving the incoming connection func (mh *MessageHandle) ServeConn(hi hubio.CloudHubIO, info *model.HubInfo) { err := mh.RegisterNode(hi, info) if err != nil { klog.Errorf("fail to register node %s, reason %s", info.NodeID, err.Error()) return } klog.Infof("edge node %s for project %s connected", info.NodeID, info.ProjectID) exitServe := make(chan ExitCode, 3) stopSendMsg := make(chan struct{}) for _, handle := range mh.Handlers { go handle(hi, info, exitServe, stopSendMsg) } code := <-exitServe mh.UnregisterNode(hi, info, code) } // RegisterNode register node in cloudhub for the incoming connection func (mh *MessageHandle) RegisterNode(hi hubio.CloudHubIO, info *model.HubInfo) error { mh.MessageQueue.Connect(info) err := mh.MessageQueue.Publish(constructConnectMessage(info, true)) if err != nil { klog.Errorf("fail to publish node connect event for node %s, reason %s", info.NodeID, err.Error()) notifyEventQueueError(hi, messageQueueDisconnect, info.NodeID) err = hi.Close() if err != nil { klog.Errorf("fail to close connection, reason: %s", err.Error()) } return err } mh.nodeConns.Store(info.NodeID, hi) mh.nodeLocks.Store(info.NodeID, &sync.Mutex{}) mh.Nodes.Store(info.NodeID, true) return nil } // UnregisterNode unregister node in cloudhub func (mh *MessageHandle) UnregisterNode(hi hubio.CloudHubIO, info *model.HubInfo, code ExitCode) { mh.nodeLocks.Delete(info.NodeID) mh.nodeConns.Delete(info.NodeID) close(mh.KeepaliveChannel[info.NodeID]) delete(mh.KeepaliveChannel, info.NodeID) err := mh.MessageQueue.Publish(constructConnectMessage(info, false)) if err != nil { klog.Errorf("fail to publish node disconnect event for node %s, reason %s", info.NodeID, err.Error()) } notifyEventQueueError(hi, code, info.NodeID) mh.Nodes.Delete(info.NodeID) err = hi.Close() if err != nil { klog.Errorf("fail to close connection, reason: %s", err.Error()) } // delete the nodeQueue and nodeStore when node stopped if code == nodeStop { mh.MessageQueue.Close(info) } } // GetNodeCount returns the number of connected Nodes func (mh *MessageHandle) GetNodeCount() int { var num int iter := func(key, value interface{}) bool { num++ return true } mh.Nodes.Range(iter) return num } // ListMessageWriteLoop processes all list type resource write requests func (mh *MessageHandle) ListMessageWriteLoop(hi hubio.CloudHubIO, info *model.HubInfo, stopServe chan ExitCode, stopSendMsg chan struct{}) { nodeListQueue, err := mh.MessageQueue.GetNodeListQueue(info.NodeID) if err != nil { klog.Errorf("Failed to get nodeQueue for node %s: %v", info.NodeID, err) stopServe <- messageQueueDisconnect return } nodeListStore, err := mh.MessageQueue.GetNodeListStore(info.NodeID) if err != nil { klog.Errorf("Failed to get nodeStore for node %s: %v", info.NodeID, err) stopServe <- messageQueueDisconnect return } for { select { case <-stopSendMsg: klog.Errorf("Node %s disconnected and stopped sending messages", info.NodeID) return default: mh.handleMessage(nodeListQueue, nodeListStore, hi, info, stopServe, "listMessage") } } } // MessageWriteLoop processes all write requests func (mh *MessageHandle) MessageWriteLoop(hi hubio.CloudHubIO, info *model.HubInfo, stopServe chan ExitCode, stopSendMsg chan struct{}) { nodeQueue, err := mh.MessageQueue.GetNodeQueue(info.NodeID) if err != nil { klog.Errorf("Failed to get nodeQueue for node %s: %v", info.NodeID, err) stopServe <- messageQueueDisconnect return } nodeStore, err := mh.MessageQueue.GetNodeStore(info.NodeID) if err != nil { klog.Errorf("Failed to get nodeStore for node %s: %v", info.NodeID, err) stopServe <- messageQueueDisconnect return } for { select { case <-stopSendMsg: klog.Errorf("Node %s disconnected and stopped sending messages", info.NodeID) return default: mh.handleMessage(nodeQueue, nodeStore, hi, info, stopServe, "message") } } } func (mh *MessageHandle) handleMessage(nodeQueue workqueue.RateLimitingInterface, nodeStore cache.Store, hi hubio.CloudHubIO, info *model.HubInfo, stopServe chan ExitCode, msgType string) { key, quit := nodeQueue.Get() if quit { klog.Errorf("nodeQueue for node %s has shutdown", info.NodeID) return } obj, exist, _ := nodeStore.GetByKey(key.(string)) if !exist { klog.Errorf("nodeStore for node %s doesn't exist", info.NodeID) return } msg := obj.(*beehiveModel.Message) if model.IsNodeStopped(msg) { klog.Infof("node %s is stopped, will disconnect", info.NodeID) stopServe <- nodeStop return } if !model.IsToEdge(msg) { klog.Infof("skip only to cloud event for node %s, %s, content %s", info.NodeID, dumpMessageMetadata(msg), msg.Content) return } klog.V(4).Infof("event to send for node %s, %s, content %s", info.NodeID, dumpMessageMetadata(msg), msg.Content) copyMsg := deepcopy(msg) trimMessage(msg) err := hi.SetWriteDeadline(time.Now().Add(time.Duration(mh.WriteTimeout) * time.Second)) if err != nil { klog.Errorf("SetWriteDeadline error, %s", err.Error()) stopServe <- hubioWriteFail return } if msgType == "listMessage" { mh.send(hi, info, msg) // delete successfully sent events from the queue/store nodeStore.Delete(msg) } else { mh.sendMsg(hi, info, msg, copyMsg, nodeStore) } nodeQueue.Done(key) } func (mh *MessageHandle) sendMsg(hi hubio.CloudHubIO, info *model.HubInfo, msg, copyMsg *beehiveModel.Message, nodeStore cache.Store) { ackChan := make(chan struct{}) mh.MessageAcks.Store(msg.GetID(), ackChan) // initialize timer and retry count for sending message var ( retry = 0 retryInterval time.Duration = 5 ) ticker := time.NewTimer(retryInterval * time.Second) mh.send(hi, info, msg) LOOP: for { select { case <-ackChan: mh.saveSuccessPoint(copyMsg, info, nodeStore) break LOOP case <-ticker.C: if retry == 4 { break LOOP } mh.send(hi, info, msg) retry++ ticker.Reset(time.Second * retryInterval) } } } func (mh *MessageHandle) send(hi hubio.CloudHubIO, info *model.HubInfo, msg *beehiveModel.Message) { err := mh.hubIoWrite(hi, info.NodeID, msg) if err != nil { errStr := "write error, connection for node %s " errReason := "will be closed, affected event %s, reason %s" if err.Error() == "node disconnected" { errReason = "has been closed, affected event %s, reason %s " } errStr = errStr + errReason klog.Errorf(errStr, info.NodeID, dumpMessageMetadata(msg), err.Error()) return } } func (mh *MessageHandle) saveSuccessPoint(msg *beehiveModel.Message, info *model.HubInfo, nodeStore cache.Store) { if msg.GetGroup() == edgeconst.GroupResource { resourceNamespace, _ := edgemessagelayer.GetNamespace(*msg) resourceName, _ := edgemessagelayer.GetResourceName(*msg) resourceType, _ := edgemessagelayer.GetResourceType(*msg) resourceUID, err := channelq.GetMessageUID(*msg) if err != nil { return } objectSyncName := synccontroller.BuildObjectSyncName(info.NodeID, resourceUID) if msg.GetOperation() == beehiveModel.DeleteOperation { nodeStore.Delete(msg) mh.deleteSuccessPoint(resourceNamespace, objectSyncName) return } objectSync, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Get(objectSyncName, metav1.GetOptions{}) if err == nil { objectSync.Status.ObjectResourceVersion = msg.GetResourceVersion() _, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).UpdateStatus(objectSync) if err != nil { klog.Errorf("Failed to update objectSync: %v, resourceType: %s, resourceNamespace: %s, resourceName: %s", err, resourceType, resourceNamespace, resourceName) } } else if err != nil && apierrors.IsNotFound(err) { objectSync := &v1alpha1.ObjectSync{ ObjectMeta: metav1.ObjectMeta{ Name: objectSyncName, }, Spec: v1alpha1.ObjectSyncSpec{ ObjectAPIVersion: "", ObjectKind: resourceType, ObjectName: resourceName, }, } _, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Create(objectSync) if err != nil { klog.Errorf("Failed to create objectSync: %s, err: %v", objectSyncName, err) return } objectSyncStatus, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Get(objectSyncName, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed to get objectSync: %s, err: %v", objectSyncName, err) } objectSyncStatus.Status.ObjectResourceVersion = msg.GetResourceVersion() mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).UpdateStatus(objectSyncStatus) } } // TODO: save device info if msg.GetGroup() == deviceconst.GroupTwin { } klog.Infof("saveSuccessPoint successfully for message: %s", msg.GetResource()) } func (mh *MessageHandle) deleteSuccessPoint(resourceNamespace, objectSyncName string) { mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Delete(objectSyncName, metav1.NewDeleteOptions(0)) } func deepcopy(msg *beehiveModel.Message) *beehiveModel.Message { if msg == nil { return nil } out := new(beehiveModel.Message) out.Header = msg.Header out.Router = msg.Router out.Content = msg.Content return out }
1
16,425
For my view, `Register` means the process of **insert node resource to etcd through api-server**, which is called by upstream rather than here, how about changing the func name to `OnConnected`?
kubeedge-kubeedge
go
@@ -865,7 +865,8 @@ public abstract class ProcessEngineConfigurationImpl extends ProcessEngineConfig // telemetry /////////////////////////////////////////////////////// - protected boolean telemetryEnabled = false; + /** if set to true the telemetry will be enabled from the first engine start*/ + protected boolean telemetryInitialized = false; /** The endpoint which telemetry is sent to */ protected String telemetryEndpoint = "https://api.telemetry.camunda.cloud/pings";
1
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.impl.cfg; import static org.camunda.bpm.engine.impl.cmd.HistoryCleanupCmd.MAX_THREADS_NUMBER; import static org.camunda.bpm.engine.impl.util.EnsureUtil.ensureNotNull; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.nio.charset.Charset; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.SQLException; import java.text.ParseException; import java.util.ArrayList; import java.util.Calendar; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; import javax.naming.InitialContext; import javax.sql.DataSource; import org.apache.ibatis.builder.xml.XMLConfigBuilder; import org.apache.ibatis.datasource.pooled.PooledDataSource; import org.apache.ibatis.mapping.Environment; import org.apache.ibatis.session.Configuration; import org.apache.ibatis.session.ExecutorType; import org.apache.ibatis.session.SqlSessionFactory; import org.apache.ibatis.session.defaults.DefaultSqlSessionFactory; import org.apache.ibatis.transaction.TransactionFactory; import org.apache.ibatis.transaction.jdbc.JdbcTransactionFactory; import org.apache.ibatis.transaction.managed.ManagedTransactionFactory; import org.camunda.bpm.dmn.engine.DmnEngine; import org.camunda.bpm.dmn.engine.DmnEngineConfiguration; import org.camunda.bpm.dmn.engine.impl.DefaultDmnEngineConfiguration; import org.camunda.bpm.dmn.feel.impl.scala.function.FeelCustomFunctionProvider; import org.camunda.bpm.engine.ArtifactFactory; import org.camunda.bpm.engine.AuthorizationService; import org.camunda.bpm.engine.CaseService; import org.camunda.bpm.engine.DecisionService; import org.camunda.bpm.engine.ExternalTaskService; import org.camunda.bpm.engine.FilterService; import org.camunda.bpm.engine.FormService; import org.camunda.bpm.engine.HistoryService; import org.camunda.bpm.engine.IdentityService; import org.camunda.bpm.engine.ManagementService; import org.camunda.bpm.engine.ProcessEngine; import org.camunda.bpm.engine.ProcessEngineConfiguration; import org.camunda.bpm.engine.ProcessEngineException; import org.camunda.bpm.engine.RepositoryService; import org.camunda.bpm.engine.RuntimeService; import org.camunda.bpm.engine.TaskService; import org.camunda.bpm.engine.authorization.Groups; import org.camunda.bpm.engine.authorization.Permission; import org.camunda.bpm.engine.authorization.Permissions; import org.camunda.bpm.engine.impl.AuthorizationServiceImpl; import org.camunda.bpm.engine.impl.DecisionServiceImpl; import org.camunda.bpm.engine.impl.DefaultArtifactFactory; import org.camunda.bpm.engine.impl.ExternalTaskServiceImpl; import org.camunda.bpm.engine.impl.FilterServiceImpl; import org.camunda.bpm.engine.impl.FormServiceImpl; import org.camunda.bpm.engine.impl.HistoryServiceImpl; import org.camunda.bpm.engine.impl.IdentityServiceImpl; import org.camunda.bpm.engine.impl.ManagementServiceImpl; import org.camunda.bpm.engine.impl.ModificationBatchJobHandler; import org.camunda.bpm.engine.impl.OptimizeService; import org.camunda.bpm.engine.impl.PriorityProvider; import org.camunda.bpm.engine.impl.ProcessEngineImpl; import org.camunda.bpm.engine.impl.RepositoryServiceImpl; import org.camunda.bpm.engine.impl.RestartProcessInstancesJobHandler; import org.camunda.bpm.engine.impl.RuntimeServiceImpl; import org.camunda.bpm.engine.impl.ServiceImpl; import org.camunda.bpm.engine.impl.TaskServiceImpl; import org.camunda.bpm.engine.impl.application.ProcessApplicationManager; import org.camunda.bpm.engine.impl.batch.removaltime.BatchSetRemovalTimeJobHandler; import org.camunda.bpm.engine.impl.batch.removaltime.DecisionSetRemovalTimeJobHandler; import org.camunda.bpm.engine.impl.batch.removaltime.ProcessSetRemovalTimeJobHandler; import org.camunda.bpm.engine.impl.batch.BatchJobHandler; import org.camunda.bpm.engine.impl.batch.BatchMonitorJobHandler; import org.camunda.bpm.engine.impl.batch.BatchSeedJobHandler; import org.camunda.bpm.engine.impl.batch.deletion.DeleteHistoricProcessInstancesJobHandler; import org.camunda.bpm.engine.impl.batch.deletion.DeleteProcessInstancesJobHandler; import org.camunda.bpm.engine.impl.batch.externaltask.SetExternalTaskRetriesJobHandler; import org.camunda.bpm.engine.impl.batch.job.SetJobRetriesJobHandler; import org.camunda.bpm.engine.impl.batch.update.UpdateProcessInstancesSuspendStateJobHandler; import org.camunda.bpm.engine.impl.bpmn.behavior.ExternalTaskActivityBehavior; import org.camunda.bpm.engine.impl.bpmn.deployer.BpmnDeployer; import org.camunda.bpm.engine.impl.bpmn.parser.BpmnParseListener; import org.camunda.bpm.engine.impl.bpmn.parser.BpmnParser; import org.camunda.bpm.engine.impl.bpmn.parser.DefaultFailedJobParseListener; import org.camunda.bpm.engine.impl.calendar.BusinessCalendarManager; import org.camunda.bpm.engine.impl.calendar.CycleBusinessCalendar; import org.camunda.bpm.engine.impl.calendar.DueDateBusinessCalendar; import org.camunda.bpm.engine.impl.calendar.DurationBusinessCalendar; import org.camunda.bpm.engine.impl.calendar.MapBusinessCalendarManager; import org.camunda.bpm.engine.impl.cfg.auth.AuthorizationCommandChecker; import org.camunda.bpm.engine.impl.cfg.auth.DefaultAuthorizationProvider; import org.camunda.bpm.engine.impl.cfg.auth.DefaultPermissionProvider; import org.camunda.bpm.engine.impl.cfg.auth.PermissionProvider; import org.camunda.bpm.engine.impl.cfg.auth.ResourceAuthorizationProvider; import org.camunda.bpm.engine.impl.cfg.multitenancy.TenantCommandChecker; import org.camunda.bpm.engine.impl.cfg.multitenancy.TenantIdProvider; import org.camunda.bpm.engine.impl.cfg.standalone.StandaloneTransactionContextFactory; import org.camunda.bpm.engine.impl.cmd.HistoryCleanupCmd; import org.camunda.bpm.engine.impl.cmmn.CaseServiceImpl; import org.camunda.bpm.engine.impl.cmmn.deployer.CmmnDeployer; import org.camunda.bpm.engine.impl.cmmn.entity.repository.CaseDefinitionManager; import org.camunda.bpm.engine.impl.cmmn.entity.runtime.CaseExecutionManager; import org.camunda.bpm.engine.impl.cmmn.entity.runtime.CaseSentryPartManager; import org.camunda.bpm.engine.impl.cmmn.handler.DefaultCmmnElementHandlerRegistry; import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformFactory; import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformListener; import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformer; import org.camunda.bpm.engine.impl.cmmn.transformer.DefaultCmmnTransformFactory; import org.camunda.bpm.engine.impl.db.DbIdGenerator; import org.camunda.bpm.engine.impl.db.entitymanager.DbEntityManagerFactory; import org.camunda.bpm.engine.impl.db.entitymanager.cache.DbEntityCacheKeyMapping; import org.camunda.bpm.engine.impl.db.sql.DbSqlPersistenceProviderFactory; import org.camunda.bpm.engine.impl.db.sql.DbSqlSessionFactory; import org.camunda.bpm.engine.impl.delegate.DefaultDelegateInterceptor; import org.camunda.bpm.engine.impl.digest.Default16ByteSaltGenerator; import org.camunda.bpm.engine.impl.digest.PasswordEncryptor; import org.camunda.bpm.engine.impl.digest.PasswordManager; import org.camunda.bpm.engine.impl.digest.SaltGenerator; import org.camunda.bpm.engine.impl.digest.Sha512HashDigest; import org.camunda.bpm.engine.impl.dmn.batch.DeleteHistoricDecisionInstancesJobHandler; import org.camunda.bpm.engine.impl.dmn.configuration.DmnEngineConfigurationBuilder; import org.camunda.bpm.engine.impl.dmn.deployer.DecisionDefinitionDeployer; import org.camunda.bpm.engine.impl.dmn.deployer.DecisionRequirementsDefinitionDeployer; import org.camunda.bpm.engine.impl.dmn.entity.repository.DecisionDefinitionManager; import org.camunda.bpm.engine.impl.dmn.entity.repository.DecisionRequirementsDefinitionManager; import org.camunda.bpm.engine.impl.el.CommandContextFunctionMapper; import org.camunda.bpm.engine.impl.el.DateTimeFunctionMapper; import org.camunda.bpm.engine.impl.el.ExpressionManager; import org.camunda.bpm.engine.impl.event.CompensationEventHandler; import org.camunda.bpm.engine.impl.event.ConditionalEventHandler; import org.camunda.bpm.engine.impl.event.EventHandler; import org.camunda.bpm.engine.impl.event.EventHandlerImpl; import org.camunda.bpm.engine.impl.event.EventType; import org.camunda.bpm.engine.impl.event.SignalEventHandler; import org.camunda.bpm.engine.impl.externaltask.DefaultExternalTaskPriorityProvider; import org.camunda.bpm.engine.impl.form.engine.FormEngine; import org.camunda.bpm.engine.impl.form.engine.HtmlFormEngine; import org.camunda.bpm.engine.impl.form.engine.JuelFormEngine; import org.camunda.bpm.engine.impl.form.type.AbstractFormFieldType; import org.camunda.bpm.engine.impl.form.type.BooleanFormType; import org.camunda.bpm.engine.impl.form.type.DateFormType; import org.camunda.bpm.engine.impl.form.type.FormTypes; import org.camunda.bpm.engine.impl.form.type.LongFormType; import org.camunda.bpm.engine.impl.form.type.StringFormType; import org.camunda.bpm.engine.impl.form.validator.FormFieldValidator; import org.camunda.bpm.engine.impl.form.validator.FormValidators; import org.camunda.bpm.engine.impl.form.validator.MaxLengthValidator; import org.camunda.bpm.engine.impl.form.validator.MaxValidator; import org.camunda.bpm.engine.impl.form.validator.MinLengthValidator; import org.camunda.bpm.engine.impl.form.validator.MinValidator; import org.camunda.bpm.engine.impl.form.validator.ReadOnlyValidator; import org.camunda.bpm.engine.impl.form.validator.RequiredValidator; import org.camunda.bpm.engine.impl.history.DefaultHistoryRemovalTimeProvider; import org.camunda.bpm.engine.impl.history.HistoryLevel; import org.camunda.bpm.engine.impl.history.HistoryRemovalTimeProvider; import org.camunda.bpm.engine.impl.history.event.HistoricDecisionInstanceManager; import org.camunda.bpm.engine.impl.history.event.HostnameProvider; import org.camunda.bpm.engine.impl.history.handler.CompositeDbHistoryEventHandler; import org.camunda.bpm.engine.impl.history.handler.CompositeHistoryEventHandler; import org.camunda.bpm.engine.impl.history.handler.DbHistoryEventHandler; import org.camunda.bpm.engine.impl.history.handler.HistoryEventHandler; import org.camunda.bpm.engine.impl.history.parser.HistoryParseListener; import org.camunda.bpm.engine.impl.history.producer.CacheAwareCmmnHistoryEventProducer; import org.camunda.bpm.engine.impl.history.producer.CacheAwareHistoryEventProducer; import org.camunda.bpm.engine.impl.history.producer.CmmnHistoryEventProducer; import org.camunda.bpm.engine.impl.history.producer.DefaultDmnHistoryEventProducer; import org.camunda.bpm.engine.impl.history.producer.DmnHistoryEventProducer; import org.camunda.bpm.engine.impl.history.producer.HistoryEventProducer; import org.camunda.bpm.engine.impl.history.transformer.CmmnHistoryTransformListener; import org.camunda.bpm.engine.impl.identity.DefaultPasswordPolicyImpl; import org.camunda.bpm.engine.impl.identity.ReadOnlyIdentityProvider; import org.camunda.bpm.engine.impl.identity.WritableIdentityProvider; import org.camunda.bpm.engine.impl.identity.db.DbIdentityServiceProvider; import org.camunda.bpm.engine.impl.incident.DefaultIncidentHandler; import org.camunda.bpm.engine.impl.incident.IncidentHandler; import org.camunda.bpm.engine.impl.interceptor.CommandContextFactory; import org.camunda.bpm.engine.impl.interceptor.CommandExecutor; import org.camunda.bpm.engine.impl.interceptor.CommandExecutorImpl; import org.camunda.bpm.engine.impl.interceptor.CommandInterceptor; import org.camunda.bpm.engine.impl.interceptor.DelegateInterceptor; import org.camunda.bpm.engine.impl.interceptor.SessionFactory; import org.camunda.bpm.engine.impl.jobexecutor.AsyncContinuationJobHandler; import org.camunda.bpm.engine.impl.jobexecutor.DefaultFailedJobCommandFactory; import org.camunda.bpm.engine.impl.jobexecutor.DefaultJobExecutor; import org.camunda.bpm.engine.impl.jobexecutor.DefaultJobPriorityProvider; import org.camunda.bpm.engine.impl.jobexecutor.FailedJobCommandFactory; import org.camunda.bpm.engine.impl.jobexecutor.JobDeclaration; import org.camunda.bpm.engine.impl.jobexecutor.JobExecutor; import org.camunda.bpm.engine.impl.jobexecutor.JobHandler; import org.camunda.bpm.engine.impl.jobexecutor.NotifyAcquisitionRejectedJobsHandler; import org.camunda.bpm.engine.impl.jobexecutor.ProcessEventJobHandler; import org.camunda.bpm.engine.impl.jobexecutor.RejectedJobsHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerActivateJobDefinitionHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerActivateProcessDefinitionHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerCatchIntermediateEventJobHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerExecuteNestedActivityJobHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerStartEventJobHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerStartEventSubprocessJobHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerSuspendJobDefinitionHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerSuspendProcessDefinitionHandler; import org.camunda.bpm.engine.impl.jobexecutor.TimerTaskListenerJobHandler; import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.BatchWindowManager; import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.DefaultBatchWindowManager; import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupBatch; import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupHandler; import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupHelper; import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupJobHandler; import org.camunda.bpm.engine.impl.metrics.MetricsRegistry; import org.camunda.bpm.engine.impl.metrics.MetricsReporterIdProvider; import org.camunda.bpm.engine.impl.history.event.SimpleIpBasedProvider; import org.camunda.bpm.engine.impl.metrics.parser.MetricsBpmnParseListener; import org.camunda.bpm.engine.impl.metrics.parser.MetricsCmmnTransformListener; import org.camunda.bpm.engine.impl.metrics.reporter.DbMetricsReporter; import org.camunda.bpm.engine.impl.migration.DefaultMigrationActivityMatcher; import org.camunda.bpm.engine.impl.migration.DefaultMigrationInstructionGenerator; import org.camunda.bpm.engine.impl.migration.MigrationActivityMatcher; import org.camunda.bpm.engine.impl.migration.MigrationInstructionGenerator; import org.camunda.bpm.engine.impl.migration.batch.MigrationBatchJobHandler; import org.camunda.bpm.engine.impl.migration.validation.activity.MigrationActivityValidator; import org.camunda.bpm.engine.impl.migration.validation.activity.NoCompensationHandlerActivityValidator; import org.camunda.bpm.engine.impl.migration.validation.activity.SupportedActivityValidator; import org.camunda.bpm.engine.impl.migration.validation.activity.SupportedPassiveEventTriggerActivityValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncAfterMigrationValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncMigrationValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncProcessStartMigrationValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingActivityInstanceValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingCompensationInstanceValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingTransitionInstanceValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.NoUnmappedCompensationStartEventValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.NoUnmappedLeafInstanceValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.SupportedActivityInstanceValidator; import org.camunda.bpm.engine.impl.migration.validation.instance.VariableConflictActivityInstanceValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.AdditionalFlowScopeInstructionValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotAddMultiInstanceBodyValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotAddMultiInstanceInnerActivityValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotRemoveMultiInstanceInnerActivityValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.ConditionalEventUpdateEventTriggerValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.GatewayMappingValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.MigrationInstructionValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.OnlyOnceMappedActivityInstructionValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.SameBehaviorInstructionValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.SameEventScopeInstructionValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.SameEventTypeValidator; import org.camunda.bpm.engine.impl.migration.validation.instruction.UpdateEventTriggersValidator; import org.camunda.bpm.engine.impl.optimize.OptimizeManager; import org.camunda.bpm.engine.impl.persistence.GenericManagerFactory; import org.camunda.bpm.engine.impl.persistence.deploy.Deployer; import org.camunda.bpm.engine.impl.persistence.deploy.cache.CacheFactory; import org.camunda.bpm.engine.impl.persistence.deploy.cache.DefaultCacheFactory; import org.camunda.bpm.engine.impl.persistence.deploy.cache.DeploymentCache; import org.camunda.bpm.engine.impl.persistence.entity.AttachmentManager; import org.camunda.bpm.engine.impl.persistence.entity.AuthorizationManager; import org.camunda.bpm.engine.impl.persistence.entity.BatchManager; import org.camunda.bpm.engine.impl.persistence.entity.ByteArrayManager; import org.camunda.bpm.engine.impl.persistence.entity.CommentManager; import org.camunda.bpm.engine.impl.persistence.entity.DeploymentManager; import org.camunda.bpm.engine.impl.persistence.entity.EventSubscriptionManager; import org.camunda.bpm.engine.impl.persistence.entity.ExecutionManager; import org.camunda.bpm.engine.impl.persistence.entity.ExternalTaskManager; import org.camunda.bpm.engine.impl.persistence.entity.FilterManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricActivityInstanceManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricBatchManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricCaseActivityInstanceManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricCaseInstanceManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricDetailManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricExternalTaskLogManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricIdentityLinkLogManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricIncidentManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricJobLogManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricProcessInstanceManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricStatisticsManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricTaskInstanceManager; import org.camunda.bpm.engine.impl.persistence.entity.HistoricVariableInstanceManager; import org.camunda.bpm.engine.impl.persistence.entity.IdentityInfoManager; import org.camunda.bpm.engine.impl.persistence.entity.IdentityLinkManager; import org.camunda.bpm.engine.impl.persistence.entity.IncidentManager; import org.camunda.bpm.engine.impl.persistence.entity.JobDefinitionManager; import org.camunda.bpm.engine.impl.persistence.entity.JobManager; import org.camunda.bpm.engine.impl.persistence.entity.MeterLogManager; import org.camunda.bpm.engine.impl.persistence.entity.ProcessDefinitionManager; import org.camunda.bpm.engine.impl.persistence.entity.PropertyManager; import org.camunda.bpm.engine.impl.persistence.entity.ReportManager; import org.camunda.bpm.engine.impl.persistence.entity.ResourceManager; import org.camunda.bpm.engine.impl.persistence.entity.SchemaLogManager; import org.camunda.bpm.engine.impl.persistence.entity.StatisticsManager; import org.camunda.bpm.engine.impl.persistence.entity.TableDataManager; import org.camunda.bpm.engine.impl.persistence.entity.TaskManager; import org.camunda.bpm.engine.impl.persistence.entity.TaskReportManager; import org.camunda.bpm.engine.impl.persistence.entity.TenantManager; import org.camunda.bpm.engine.impl.persistence.entity.UserOperationLogManager; import org.camunda.bpm.engine.impl.persistence.entity.VariableInstanceManager; import org.camunda.bpm.engine.impl.repository.DefaultDeploymentHandlerFactory; import org.camunda.bpm.engine.impl.runtime.ConditionHandler; import org.camunda.bpm.engine.impl.runtime.CorrelationHandler; import org.camunda.bpm.engine.impl.runtime.DefaultConditionHandler; import org.camunda.bpm.engine.impl.runtime.DefaultCorrelationHandler; import org.camunda.bpm.engine.impl.runtime.DefaultDeserializationTypeValidator; import org.camunda.bpm.engine.impl.scripting.ScriptFactory; import org.camunda.bpm.engine.impl.scripting.engine.BeansResolverFactory; import org.camunda.bpm.engine.impl.scripting.engine.ResolverFactory; import org.camunda.bpm.engine.impl.scripting.engine.ScriptBindingsFactory; import org.camunda.bpm.engine.impl.scripting.engine.ScriptingEngines; import org.camunda.bpm.engine.impl.scripting.engine.VariableScopeResolverFactory; import org.camunda.bpm.engine.impl.scripting.env.ScriptEnvResolver; import org.camunda.bpm.engine.impl.scripting.env.ScriptingEnvironment; import org.camunda.bpm.engine.impl.util.IoUtil; import org.camunda.bpm.engine.impl.util.ParseUtil; import org.camunda.bpm.engine.impl.util.ReflectUtil; import org.camunda.bpm.engine.impl.variable.ValueTypeResolverImpl; import org.camunda.bpm.engine.impl.variable.serializer.BooleanValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.ByteArrayValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.DateValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.DefaultVariableSerializers; import org.camunda.bpm.engine.impl.variable.serializer.DoubleValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.FileValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.IntegerValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.JavaObjectSerializer; import org.camunda.bpm.engine.impl.variable.serializer.LongValueSerlializer; import org.camunda.bpm.engine.impl.variable.serializer.NullValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.ShortValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.StringValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.TypedValueSerializer; import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializerFactory; import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializers; import org.camunda.bpm.engine.impl.variable.serializer.jpa.EntityManagerSession; import org.camunda.bpm.engine.impl.variable.serializer.jpa.EntityManagerSessionFactory; import org.camunda.bpm.engine.impl.variable.serializer.jpa.JPAVariableSerializer; import org.camunda.bpm.engine.management.Metrics; import org.camunda.bpm.engine.repository.DeploymentBuilder; import org.camunda.bpm.engine.repository.DeploymentHandlerFactory; import org.camunda.bpm.engine.runtime.Incident; import org.camunda.bpm.engine.runtime.WhitelistingDeserializationTypeValidator; import org.camunda.bpm.engine.test.mock.MocksResolverFactory; import org.camunda.bpm.engine.variable.Variables; import org.camunda.bpm.engine.variable.type.ValueType; /** * @author Tom Baeyens */ public abstract class ProcessEngineConfigurationImpl extends ProcessEngineConfiguration { protected final static ConfigurationLogger LOG = ConfigurationLogger.CONFIG_LOGGER; public static final String DB_SCHEMA_UPDATE_CREATE = "create"; public static final String DB_SCHEMA_UPDATE_DROP_CREATE = "drop-create"; public static final int HISTORYLEVEL_NONE = HistoryLevel.HISTORY_LEVEL_NONE.getId(); public static final int HISTORYLEVEL_ACTIVITY = HistoryLevel.HISTORY_LEVEL_ACTIVITY.getId(); public static final int HISTORYLEVEL_AUDIT = HistoryLevel.HISTORY_LEVEL_AUDIT.getId(); public static final int HISTORYLEVEL_FULL = HistoryLevel.HISTORY_LEVEL_FULL.getId(); public static final String DEFAULT_WS_SYNC_FACTORY = "org.camunda.bpm.engine.impl.webservice.CxfWebServiceClientFactory"; public static final String DEFAULT_MYBATIS_MAPPING_FILE = "org/camunda/bpm/engine/impl/mapping/mappings.xml"; public static final int DEFAULT_FAILED_JOB_LISTENER_MAX_RETRIES = 3; public static final int DEFAULT_INVOCATIONS_PER_BATCH_JOB = 1; public static SqlSessionFactory cachedSqlSessionFactory; // SERVICES ///////////////////////////////////////////////////////////////// protected RepositoryService repositoryService = new RepositoryServiceImpl(); protected RuntimeService runtimeService = new RuntimeServiceImpl(); protected HistoryService historyService = new HistoryServiceImpl(); protected IdentityService identityService = new IdentityServiceImpl(); protected TaskService taskService = new TaskServiceImpl(); protected FormService formService = new FormServiceImpl(); protected ManagementService managementService = new ManagementServiceImpl(); protected AuthorizationService authorizationService = new AuthorizationServiceImpl(); protected CaseService caseService = new CaseServiceImpl(); protected FilterService filterService = new FilterServiceImpl(); protected ExternalTaskService externalTaskService = new ExternalTaskServiceImpl(); protected DecisionService decisionService = new DecisionServiceImpl(); protected OptimizeService optimizeService = new OptimizeService(); // COMMAND EXECUTORS //////////////////////////////////////////////////////// // Command executor and interceptor stack /** * the configurable list which will be {@link #initInterceptorChain(java.util.List) processed} to build the {@link #commandExecutorTxRequired} */ protected List<CommandInterceptor> customPreCommandInterceptorsTxRequired; protected List<CommandInterceptor> customPostCommandInterceptorsTxRequired; protected List<CommandInterceptor> commandInterceptorsTxRequired; /** * this will be initialized during the configurationComplete() */ protected CommandExecutor commandExecutorTxRequired; /** * the configurable list which will be {@link #initInterceptorChain(List) processed} to build the {@link #commandExecutorTxRequiresNew} */ protected List<CommandInterceptor> customPreCommandInterceptorsTxRequiresNew; protected List<CommandInterceptor> customPostCommandInterceptorsTxRequiresNew; protected List<CommandInterceptor> commandInterceptorsTxRequiresNew; /** * this will be initialized during the configurationComplete() */ protected CommandExecutor commandExecutorTxRequiresNew; /** * Separate command executor to be used for db schema operations. Must always use NON-JTA transactions */ protected CommandExecutor commandExecutorSchemaOperations; // SESSION FACTORIES //////////////////////////////////////////////////////// protected List<SessionFactory> customSessionFactories; protected DbSqlSessionFactory dbSqlSessionFactory; protected Map<Class<?>, SessionFactory> sessionFactories; // DEPLOYERS //////////////////////////////////////////////////////////////// protected List<Deployer> customPreDeployers; protected List<Deployer> customPostDeployers; protected List<Deployer> deployers; protected DeploymentCache deploymentCache; // CACHE //////////////////////////////////////////////////////////////////// protected CacheFactory cacheFactory; protected int cacheCapacity = 1000; protected boolean enableFetchProcessDefinitionDescription = true; // JOB EXECUTOR ///////////////////////////////////////////////////////////// protected List<JobHandler> customJobHandlers; protected Map<String, JobHandler> jobHandlers; protected JobExecutor jobExecutor; protected PriorityProvider<JobDeclaration<?, ?>> jobPriorityProvider; // EXTERNAL TASK ///////////////////////////////////////////////////////////// protected PriorityProvider<ExternalTaskActivityBehavior> externalTaskPriorityProvider; // MYBATIS SQL SESSION FACTORY ////////////////////////////////////////////// protected SqlSessionFactory sqlSessionFactory; protected TransactionFactory transactionFactory; // ID GENERATOR ///////////////////////////////////////////////////////////// protected IdGenerator idGenerator; protected DataSource idGeneratorDataSource; protected String idGeneratorDataSourceJndiName; // INCIDENT HANDLER ///////////////////////////////////////////////////////// protected Map<String, IncidentHandler> incidentHandlers; protected List<IncidentHandler> customIncidentHandlers; // BATCH //////////////////////////////////////////////////////////////////// protected Map<String, BatchJobHandler<?>> batchHandlers; protected List<BatchJobHandler<?>> customBatchJobHandlers; /** * Number of jobs created by a batch seed job invocation */ protected int batchJobsPerSeed = 100; /** * Number of invocations executed by a single batch job */ protected int invocationsPerBatchJob = DEFAULT_INVOCATIONS_PER_BATCH_JOB; /** * Map to set an individual value for each batch type to * control the invocations per batch job. Unless specified * in this map, value of 'invocationsPerBatchJob' is used. */ protected Map<String, Integer> invocationsPerBatchJobByBatchType; /** * seconds to wait between polling for batch completion */ protected int batchPollTime = 30; /** * default priority for batch jobs */ protected long batchJobPriority = DefaultJobPriorityProvider.DEFAULT_PRIORITY; // OTHER //////////////////////////////////////////////////////////////////// protected List<FormEngine> customFormEngines; protected Map<String, FormEngine> formEngines; protected List<AbstractFormFieldType> customFormTypes; protected FormTypes formTypes; protected FormValidators formValidators; protected Map<String, Class<? extends FormFieldValidator>> customFormFieldValidators; protected List<TypedValueSerializer> customPreVariableSerializers; protected List<TypedValueSerializer> customPostVariableSerializers; protected VariableSerializers variableSerializers; protected VariableSerializerFactory fallbackSerializerFactory; protected String defaultSerializationFormat = Variables.SerializationDataFormats.JAVA.getName(); protected boolean javaSerializationFormatEnabled = false; protected String defaultCharsetName = null; protected Charset defaultCharset = null; protected ExpressionManager expressionManager; protected ScriptingEngines scriptingEngines; protected List<ResolverFactory> resolverFactories; protected ScriptingEnvironment scriptingEnvironment; protected List<ScriptEnvResolver> scriptEnvResolvers; protected ScriptFactory scriptFactory; protected boolean autoStoreScriptVariables = false; protected boolean enableScriptCompilation = true; protected boolean enableScriptEngineCaching = true; protected boolean enableFetchScriptEngineFromProcessApplication = true; protected boolean cmmnEnabled = true; protected boolean dmnEnabled = true; protected boolean enableGracefulDegradationOnContextSwitchFailure = true; protected BusinessCalendarManager businessCalendarManager; protected String wsSyncFactoryClassName = DEFAULT_WS_SYNC_FACTORY; protected CommandContextFactory commandContextFactory; protected TransactionContextFactory transactionContextFactory; protected BpmnParseFactory bpmnParseFactory; // cmmn protected CmmnTransformFactory cmmnTransformFactory; protected DefaultCmmnElementHandlerRegistry cmmnElementHandlerRegistry; // dmn protected DefaultDmnEngineConfiguration dmnEngineConfiguration; protected DmnEngine dmnEngine; /** * a list of DMN FEEL custom function providers */ protected List<FeelCustomFunctionProvider> dmnFeelCustomFunctionProviders; /** * Enable DMN FEEL legacy behavior */ protected boolean dmnFeelEnableLegacyBehavior = false; protected HistoryLevel historyLevel; /** * a list of supported history levels */ protected List<HistoryLevel> historyLevels; /** * a list of supported custom history levels */ protected List<HistoryLevel> customHistoryLevels; protected List<BpmnParseListener> preParseListeners; protected List<BpmnParseListener> postParseListeners; protected List<CmmnTransformListener> customPreCmmnTransformListeners; protected List<CmmnTransformListener> customPostCmmnTransformListeners; protected Map<Object, Object> beans; protected boolean isDbIdentityUsed = true; protected boolean isDbHistoryUsed = true; protected DelegateInterceptor delegateInterceptor; protected CommandInterceptor actualCommandExecutor; protected RejectedJobsHandler customRejectedJobsHandler; protected Map<String, EventHandler> eventHandlers; protected List<EventHandler> customEventHandlers; protected FailedJobCommandFactory failedJobCommandFactory; protected String databaseTablePrefix = ""; /** * In some situations you want to set the schema to use for table checks / generation if the database metadata * doesn't return that correctly, see https://jira.codehaus.org/browse/ACT-1220, * https://jira.codehaus.org/browse/ACT-1062 */ protected String databaseSchema = null; protected boolean isCreateDiagramOnDeploy = false; protected ProcessApplicationManager processApplicationManager; protected CorrelationHandler correlationHandler; protected ConditionHandler conditionHandler; /** * session factory to be used for obtaining identity provider sessions */ protected SessionFactory identityProviderSessionFactory; protected PasswordEncryptor passwordEncryptor; protected List<PasswordEncryptor> customPasswordChecker; protected PasswordManager passwordManager; protected SaltGenerator saltGenerator; protected Set<String> registeredDeployments; protected DeploymentHandlerFactory deploymentHandlerFactory; protected ResourceAuthorizationProvider resourceAuthorizationProvider; protected List<ProcessEnginePlugin> processEnginePlugins = new ArrayList<>(); protected HistoryEventProducer historyEventProducer; protected CmmnHistoryEventProducer cmmnHistoryEventProducer; protected DmnHistoryEventProducer dmnHistoryEventProducer; /** * As an instance of {@link org.camunda.bpm.engine.impl.history.handler.CompositeHistoryEventHandler} * it contains all the provided history event handlers that process history events. */ protected HistoryEventHandler historyEventHandler; /** * Allows users to add additional {@link HistoryEventHandler} * instances to process history events. */ protected List<HistoryEventHandler> customHistoryEventHandlers = new ArrayList<>(); /** * If true, the default {@link DbHistoryEventHandler} will be included in the list * of history event handlers. */ protected boolean enableDefaultDbHistoryEventHandler = true; protected PermissionProvider permissionProvider; protected boolean isExecutionTreePrefetchEnabled = true; /** * If true the process engine will attempt to acquire an exclusive lock before * creating a deployment. */ protected boolean isDeploymentLockUsed = true; /** * If true then several deployments will be processed strictly sequentally. When false they may be processed in parallel. */ protected boolean isDeploymentSynchronized = true; /** * Allows setting whether the process engine should try reusing the first level entity cache. * Default setting is false, enabling it improves performance of asynchronous continuations. */ protected boolean isDbEntityCacheReuseEnabled = false; protected boolean isInvokeCustomVariableListeners = true; /** * The process engine created by this configuration. */ protected ProcessEngineImpl processEngine; /** * used to create instances for listeners, JavaDelegates, etc */ protected ArtifactFactory artifactFactory; protected DbEntityCacheKeyMapping dbEntityCacheKeyMapping = DbEntityCacheKeyMapping.defaultEntityCacheKeyMapping(); /** * the metrics registry */ protected MetricsRegistry metricsRegistry; protected DbMetricsReporter dbMetricsReporter; protected boolean isMetricsEnabled = true; protected boolean isDbMetricsReporterActivate = true; protected MetricsReporterIdProvider metricsReporterIdProvider; /** * the historic job log host name */ protected String hostname; protected HostnameProvider hostnameProvider; /** * handling of expressions submitted via API; can be used as guards against remote code execution */ protected boolean enableExpressionsInAdhocQueries = false; protected boolean enableExpressionsInStoredQueries = true; /** * If false, disables XML eXternal Entity (XXE) Processing. This provides protection against XXE Processing attacks. */ protected boolean enableXxeProcessing = false; /** * If true, user operation log entries are only written if there is an * authenticated user present in the context. If false, user operation log * entries are written regardless of authentication state. */ protected boolean restrictUserOperationLogToAuthenticatedUsers = true; protected boolean disableStrictCallActivityValidation = false; protected boolean isBpmnStacktraceVerbose = false; protected boolean forceCloseMybatisConnectionPool = true; protected TenantIdProvider tenantIdProvider = null; protected List<CommandChecker> commandCheckers = null; protected List<String> adminGroups; protected List<String> adminUsers; // Migration protected MigrationActivityMatcher migrationActivityMatcher; protected List<MigrationActivityValidator> customPreMigrationActivityValidators; protected List<MigrationActivityValidator> customPostMigrationActivityValidators; protected MigrationInstructionGenerator migrationInstructionGenerator; protected List<MigrationInstructionValidator> customPreMigrationInstructionValidators; protected List<MigrationInstructionValidator> customPostMigrationInstructionValidators; protected List<MigrationInstructionValidator> migrationInstructionValidators; protected List<MigratingActivityInstanceValidator> customPreMigratingActivityInstanceValidators; protected List<MigratingActivityInstanceValidator> customPostMigratingActivityInstanceValidators; protected List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators; protected List<MigratingTransitionInstanceValidator> migratingTransitionInstanceValidators; protected List<MigratingCompensationInstanceValidator> migratingCompensationInstanceValidators; // Default user permission for task protected Permission defaultUserPermissionForTask; /** * Historic instance permissions are disabled by default */ protected boolean enableHistoricInstancePermissions = false; protected boolean isUseSharedSqlSessionFactory = false; //History cleanup configuration protected String historyCleanupBatchWindowStartTime; protected String historyCleanupBatchWindowEndTime = "00:00"; protected Date historyCleanupBatchWindowStartTimeAsDate; protected Date historyCleanupBatchWindowEndTimeAsDate; protected Map<Integer, BatchWindowConfiguration> historyCleanupBatchWindows = new HashMap<>(); //shortcuts for batch windows configuration available to be configured from XML protected String mondayHistoryCleanupBatchWindowStartTime; protected String mondayHistoryCleanupBatchWindowEndTime; protected String tuesdayHistoryCleanupBatchWindowStartTime; protected String tuesdayHistoryCleanupBatchWindowEndTime; protected String wednesdayHistoryCleanupBatchWindowStartTime; protected String wednesdayHistoryCleanupBatchWindowEndTime; protected String thursdayHistoryCleanupBatchWindowStartTime; protected String thursdayHistoryCleanupBatchWindowEndTime; protected String fridayHistoryCleanupBatchWindowStartTime; protected String fridayHistoryCleanupBatchWindowEndTime; protected String saturdayHistoryCleanupBatchWindowStartTime; protected String saturdayHistoryCleanupBatchWindowEndTime; protected String sundayHistoryCleanupBatchWindowStartTime; protected String sundayHistoryCleanupBatchWindowEndTime; protected int historyCleanupDegreeOfParallelism = 1; protected String historyTimeToLive; protected String batchOperationHistoryTimeToLive; protected Map<String, String> batchOperationsForHistoryCleanup; protected Map<String, Integer> parsedBatchOperationsForHistoryCleanup; protected BatchWindowManager batchWindowManager = new DefaultBatchWindowManager(); protected HistoryRemovalTimeProvider historyRemovalTimeProvider; protected String historyRemovalTimeStrategy; protected String historyCleanupStrategy; /** * Size of batch in which history cleanup data will be deleted. {@link HistoryCleanupBatch#MAX_BATCH_SIZE} must be respected. */ private int historyCleanupBatchSize = 500; /** * Indicates the minimal amount of data to trigger the history cleanup. */ private int historyCleanupBatchThreshold = 10; private boolean historyCleanupMetricsEnabled = true; /** * Controls whether engine participates in history cleanup or not. */ protected boolean historyCleanupEnabled = true; private int failedJobListenerMaxRetries = DEFAULT_FAILED_JOB_LISTENER_MAX_RETRIES; protected String failedJobRetryTimeCycle; // login attempts /////////////////////////////////////////////////////// protected int loginMaxAttempts = 10; protected int loginDelayFactor = 2; protected int loginDelayMaxTime = 60; protected int loginDelayBase = 3; // max results limit protected int queryMaxResultsLimit = Integer.MAX_VALUE; // logging context property names (with default values) protected String loggingContextActivityId = "activityId"; protected String loggingContextApplicationName = "applicationName"; protected String loggingContextBusinessKey;// default == null => disabled by default protected String loggingContextProcessDefinitionId = "processDefinitionId"; protected String loggingContextProcessInstanceId = "processInstanceId"; protected String loggingContextTenantId = "tenantId"; // telemetry /////////////////////////////////////////////////////// protected boolean telemetryEnabled = false; /** The endpoint which telemetry is sent to */ protected String telemetryEndpoint = "https://api.telemetry.camunda.cloud/pings"; // buildProcessEngine /////////////////////////////////////////////////////// @Override public ProcessEngine buildProcessEngine() { init(); processEngine = new ProcessEngineImpl(this); invokePostProcessEngineBuild(processEngine); return processEngine; } // init ///////////////////////////////////////////////////////////////////// protected void init() { invokePreInit(); initDefaultCharset(); initHistoryLevel(); initHistoryEventProducer(); initCmmnHistoryEventProducer(); initDmnHistoryEventProducer(); initHistoryEventHandler(); initExpressionManager(); initBeans(); initArtifactFactory(); initFormEngines(); initFormTypes(); initFormFieldValidators(); initScripting(); initDmnEngine(); initBusinessCalendarManager(); initCommandContextFactory(); initTransactionContextFactory(); initCommandExecutors(); initServices(); initIdGenerator(); initFailedJobCommandFactory(); initDeployers(); initJobProvider(); initExternalTaskPriorityProvider(); initBatchHandlers(); initJobExecutor(); initDataSource(); initTransactionFactory(); initSqlSessionFactory(); initIdentityProviderSessionFactory(); initSessionFactories(); initValueTypeResolver(); initTypeValidator(); initSerialization(); initJpa(); initDelegateInterceptor(); initEventHandlers(); initProcessApplicationManager(); initCorrelationHandler(); initConditionHandler(); initIncidentHandlers(); initPasswordDigest(); initDeploymentRegistration(); initDeploymentHandlerFactory(); initResourceAuthorizationProvider(); initPermissionProvider(); initHostName(); initMetrics(); initTelemetry(); initMigration(); initCommandCheckers(); initDefaultUserPermissionForTask(); initHistoryRemovalTime(); initHistoryCleanup(); initInvocationsPerBatchJobByBatchType(); initAdminUser(); initAdminGroups(); initPasswordPolicy(); invokePostInit(); } protected void initTypeValidator() { if (deserializationTypeValidator == null) { deserializationTypeValidator = new DefaultDeserializationTypeValidator(); } if (deserializationTypeValidator instanceof WhitelistingDeserializationTypeValidator) { WhitelistingDeserializationTypeValidator validator = (WhitelistingDeserializationTypeValidator) deserializationTypeValidator; validator.setAllowedClasses(deserializationAllowedClasses); validator.setAllowedPackages(deserializationAllowedPackages); } } public void initHistoryRemovalTime() { initHistoryRemovalTimeProvider(); initHistoryRemovalTimeStrategy(); } public void initHistoryRemovalTimeStrategy() { if (historyRemovalTimeStrategy == null) { historyRemovalTimeStrategy = HISTORY_REMOVAL_TIME_STRATEGY_END; } if (!HISTORY_REMOVAL_TIME_STRATEGY_START.equals(historyRemovalTimeStrategy) && !HISTORY_REMOVAL_TIME_STRATEGY_END.equals(historyRemovalTimeStrategy) && !HISTORY_REMOVAL_TIME_STRATEGY_NONE.equals(historyRemovalTimeStrategy)) { throw LOG.invalidPropertyValue("historyRemovalTimeStrategy", String.valueOf(historyRemovalTimeStrategy), String.format("history removal time strategy must be set to '%s', '%s' or '%s'", HISTORY_REMOVAL_TIME_STRATEGY_START, HISTORY_REMOVAL_TIME_STRATEGY_END, HISTORY_REMOVAL_TIME_STRATEGY_NONE)); } } public void initHistoryRemovalTimeProvider() { if (historyRemovalTimeProvider == null) { historyRemovalTimeProvider = new DefaultHistoryRemovalTimeProvider(); } } public void initHistoryCleanup() { initHistoryCleanupStrategy(); //validate number of threads if (historyCleanupDegreeOfParallelism < 1 || historyCleanupDegreeOfParallelism > MAX_THREADS_NUMBER) { throw LOG.invalidPropertyValue("historyCleanupDegreeOfParallelism", String.valueOf(historyCleanupDegreeOfParallelism), String.format("value for number of threads for history cleanup should be between 1 and %s", HistoryCleanupCmd.MAX_THREADS_NUMBER)); } if (historyCleanupBatchWindowStartTime != null) { initHistoryCleanupBatchWindowStartTime(); } if (historyCleanupBatchWindowEndTime != null) { initHistoryCleanupBatchWindowEndTime(); } initHistoryCleanupBatchWindowsMap(); if (historyCleanupBatchSize > HistoryCleanupHandler.MAX_BATCH_SIZE || historyCleanupBatchSize <= 0) { throw LOG.invalidPropertyValue("historyCleanupBatchSize", String.valueOf(historyCleanupBatchSize), String.format("value for batch size should be between 1 and %s", HistoryCleanupHandler.MAX_BATCH_SIZE)); } if (historyCleanupBatchThreshold < 0) { throw LOG.invalidPropertyValue("historyCleanupBatchThreshold", String.valueOf(historyCleanupBatchThreshold), "History cleanup batch threshold cannot be negative."); } initHistoryTimeToLive(); initBatchOperationsHistoryTimeToLive(); } protected void initHistoryCleanupStrategy() { if (historyCleanupStrategy == null) { historyCleanupStrategy = HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED; } if (!HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED.equals(historyCleanupStrategy) && !HISTORY_CLEANUP_STRATEGY_END_TIME_BASED.equals(historyCleanupStrategy)) { throw LOG.invalidPropertyValue("historyCleanupStrategy", String.valueOf(historyCleanupStrategy), String.format("history cleanup strategy must be either set to '%s' or '%s'", HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED, HISTORY_CLEANUP_STRATEGY_END_TIME_BASED)); } if (HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED.equals(historyCleanupStrategy) && HISTORY_REMOVAL_TIME_STRATEGY_NONE.equals(historyRemovalTimeStrategy)) { throw LOG.invalidPropertyValue("historyRemovalTimeStrategy", String.valueOf(historyRemovalTimeStrategy), String.format("history removal time strategy cannot be set to '%s' in conjunction with '%s' history cleanup strategy", HISTORY_REMOVAL_TIME_STRATEGY_NONE, HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED)); } } private void initHistoryCleanupBatchWindowsMap() { if (mondayHistoryCleanupBatchWindowStartTime != null || mondayHistoryCleanupBatchWindowEndTime != null) { historyCleanupBatchWindows.put(Calendar.MONDAY, new BatchWindowConfiguration(mondayHistoryCleanupBatchWindowStartTime, mondayHistoryCleanupBatchWindowEndTime)); } if (tuesdayHistoryCleanupBatchWindowStartTime != null || tuesdayHistoryCleanupBatchWindowEndTime != null) { historyCleanupBatchWindows.put(Calendar.TUESDAY, new BatchWindowConfiguration(tuesdayHistoryCleanupBatchWindowStartTime, tuesdayHistoryCleanupBatchWindowEndTime)); } if (wednesdayHistoryCleanupBatchWindowStartTime != null || wednesdayHistoryCleanupBatchWindowEndTime != null) { historyCleanupBatchWindows.put(Calendar.WEDNESDAY, new BatchWindowConfiguration(wednesdayHistoryCleanupBatchWindowStartTime, wednesdayHistoryCleanupBatchWindowEndTime)); } if (thursdayHistoryCleanupBatchWindowStartTime != null || thursdayHistoryCleanupBatchWindowEndTime != null) { historyCleanupBatchWindows.put(Calendar.THURSDAY, new BatchWindowConfiguration(thursdayHistoryCleanupBatchWindowStartTime, thursdayHistoryCleanupBatchWindowEndTime)); } if (fridayHistoryCleanupBatchWindowStartTime != null || fridayHistoryCleanupBatchWindowEndTime != null) { historyCleanupBatchWindows.put(Calendar.FRIDAY, new BatchWindowConfiguration(fridayHistoryCleanupBatchWindowStartTime, fridayHistoryCleanupBatchWindowEndTime)); } if (saturdayHistoryCleanupBatchWindowStartTime != null ||saturdayHistoryCleanupBatchWindowEndTime != null) { historyCleanupBatchWindows.put(Calendar.SATURDAY, new BatchWindowConfiguration(saturdayHistoryCleanupBatchWindowStartTime, saturdayHistoryCleanupBatchWindowEndTime)); } if (sundayHistoryCleanupBatchWindowStartTime != null || sundayHistoryCleanupBatchWindowEndTime != null) { historyCleanupBatchWindows.put(Calendar.SUNDAY, new BatchWindowConfiguration(sundayHistoryCleanupBatchWindowStartTime, sundayHistoryCleanupBatchWindowEndTime)); } } protected void initInvocationsPerBatchJobByBatchType() { if (invocationsPerBatchJobByBatchType == null) { invocationsPerBatchJobByBatchType = new HashMap<>(); } else { Set<String> batchTypes = invocationsPerBatchJobByBatchType.keySet(); batchTypes.stream() // batchHandlers contains custom & built-in batch handlers .filter(batchType -> !batchHandlers.containsKey(batchType)) .forEach(LOG::invalidBatchTypeForInvocationsPerBatchJob); } } protected void initHistoryTimeToLive() { try { ParseUtil.parseHistoryTimeToLive(historyTimeToLive); } catch (Exception e) { throw LOG.invalidPropertyValue("historyTimeToLive", historyTimeToLive, e); } } protected void initBatchOperationsHistoryTimeToLive() { try { ParseUtil.parseHistoryTimeToLive(batchOperationHistoryTimeToLive); } catch (Exception e) { throw LOG.invalidPropertyValue("batchOperationHistoryTimeToLive", batchOperationHistoryTimeToLive, e); } if (batchOperationsForHistoryCleanup == null) { batchOperationsForHistoryCleanup = new HashMap<>(); } else { for (String batchOperation : batchOperationsForHistoryCleanup.keySet()) { String timeToLive = batchOperationsForHistoryCleanup.get(batchOperation); if (!batchHandlers.keySet().contains(batchOperation)) { LOG.invalidBatchOperation(batchOperation, timeToLive); } try { ParseUtil.parseHistoryTimeToLive(timeToLive); } catch (Exception e) { throw LOG.invalidPropertyValue("history time to live for " + batchOperation + " batch operations", timeToLive, e); } } } if (batchHandlers != null && batchOperationHistoryTimeToLive != null) { for (String batchOperation : batchHandlers.keySet()) { if (!batchOperationsForHistoryCleanup.containsKey(batchOperation)) { batchOperationsForHistoryCleanup.put(batchOperation, batchOperationHistoryTimeToLive); } } } parsedBatchOperationsForHistoryCleanup = new HashMap<>(); if (batchOperationsForHistoryCleanup != null) { for (String operation : batchOperationsForHistoryCleanup.keySet()) { Integer historyTimeToLive = ParseUtil.parseHistoryTimeToLive(batchOperationsForHistoryCleanup.get(operation)); parsedBatchOperationsForHistoryCleanup.put(operation, historyTimeToLive); } } } private void initHistoryCleanupBatchWindowEndTime() { try { historyCleanupBatchWindowEndTimeAsDate = HistoryCleanupHelper.parseTimeConfiguration(historyCleanupBatchWindowEndTime); } catch (ParseException e) { throw LOG.invalidPropertyValue("historyCleanupBatchWindowEndTime", historyCleanupBatchWindowEndTime); } } private void initHistoryCleanupBatchWindowStartTime() { try { historyCleanupBatchWindowStartTimeAsDate = HistoryCleanupHelper.parseTimeConfiguration(historyCleanupBatchWindowStartTime); } catch (ParseException e) { throw LOG.invalidPropertyValue("historyCleanupBatchWindowStartTime", historyCleanupBatchWindowStartTime); } } protected void invokePreInit() { for (ProcessEnginePlugin plugin : processEnginePlugins) { LOG.pluginActivated(plugin.toString(), getProcessEngineName()); plugin.preInit(this); } } protected void invokePostInit() { for (ProcessEnginePlugin plugin : processEnginePlugins) { plugin.postInit(this); } } protected void invokePostProcessEngineBuild(ProcessEngine engine) { for (ProcessEnginePlugin plugin : processEnginePlugins) { plugin.postProcessEngineBuild(engine); } } // failedJobCommandFactory //////////////////////////////////////////////////////// protected void initFailedJobCommandFactory() { if (failedJobCommandFactory == null) { failedJobCommandFactory = new DefaultFailedJobCommandFactory(); } if (postParseListeners == null) { postParseListeners = new ArrayList<>(); } postParseListeners.add(new DefaultFailedJobParseListener()); } // incident handlers ///////////////////////////////////////////////////////////// protected void initIncidentHandlers() { if (incidentHandlers == null) { incidentHandlers = new HashMap<>(); DefaultIncidentHandler failedJobIncidentHandler = new DefaultIncidentHandler(Incident.FAILED_JOB_HANDLER_TYPE); incidentHandlers.put(failedJobIncidentHandler.getIncidentHandlerType(), failedJobIncidentHandler); DefaultIncidentHandler failedExternalTaskIncidentHandler = new DefaultIncidentHandler(Incident.EXTERNAL_TASK_HANDLER_TYPE); incidentHandlers.put(failedExternalTaskIncidentHandler.getIncidentHandlerType(), failedExternalTaskIncidentHandler); } if (customIncidentHandlers != null) { for (IncidentHandler incidentHandler : customIncidentHandlers) { incidentHandlers.put(incidentHandler.getIncidentHandlerType(), incidentHandler); } } } // batch /////////////////////////////////////////////////////////////////////// protected void initBatchHandlers() { if (batchHandlers == null) { batchHandlers = new HashMap<>(); MigrationBatchJobHandler migrationHandler = new MigrationBatchJobHandler(); batchHandlers.put(migrationHandler.getType(), migrationHandler); ModificationBatchJobHandler modificationHandler = new ModificationBatchJobHandler(); batchHandlers.put(modificationHandler.getType(), modificationHandler); DeleteProcessInstancesJobHandler deleteProcessJobHandler = new DeleteProcessInstancesJobHandler(); batchHandlers.put(deleteProcessJobHandler.getType(), deleteProcessJobHandler); DeleteHistoricProcessInstancesJobHandler deleteHistoricProcessInstancesJobHandler = new DeleteHistoricProcessInstancesJobHandler(); batchHandlers.put(deleteHistoricProcessInstancesJobHandler.getType(), deleteHistoricProcessInstancesJobHandler); SetJobRetriesJobHandler setJobRetriesJobHandler = new SetJobRetriesJobHandler(); batchHandlers.put(setJobRetriesJobHandler.getType(), setJobRetriesJobHandler); SetExternalTaskRetriesJobHandler setExternalTaskRetriesJobHandler = new SetExternalTaskRetriesJobHandler(); batchHandlers.put(setExternalTaskRetriesJobHandler.getType(), setExternalTaskRetriesJobHandler); RestartProcessInstancesJobHandler restartProcessInstancesJobHandler = new RestartProcessInstancesJobHandler(); batchHandlers.put(restartProcessInstancesJobHandler.getType(), restartProcessInstancesJobHandler); UpdateProcessInstancesSuspendStateJobHandler suspendProcessInstancesJobHandler = new UpdateProcessInstancesSuspendStateJobHandler(); batchHandlers.put(suspendProcessInstancesJobHandler.getType(), suspendProcessInstancesJobHandler); DeleteHistoricDecisionInstancesJobHandler deleteHistoricDecisionInstancesJobHandler = new DeleteHistoricDecisionInstancesJobHandler(); batchHandlers.put(deleteHistoricDecisionInstancesJobHandler.getType(), deleteHistoricDecisionInstancesJobHandler); ProcessSetRemovalTimeJobHandler processSetRemovalTimeJobHandler = new ProcessSetRemovalTimeJobHandler(); batchHandlers.put(processSetRemovalTimeJobHandler.getType(), processSetRemovalTimeJobHandler); DecisionSetRemovalTimeJobHandler decisionSetRemovalTimeJobHandler = new DecisionSetRemovalTimeJobHandler(); batchHandlers.put(decisionSetRemovalTimeJobHandler.getType(), decisionSetRemovalTimeJobHandler); BatchSetRemovalTimeJobHandler batchSetRemovalTimeJobHandler = new BatchSetRemovalTimeJobHandler(); batchHandlers.put(batchSetRemovalTimeJobHandler.getType(), batchSetRemovalTimeJobHandler); } if (customBatchJobHandlers != null) { for (BatchJobHandler<?> customBatchJobHandler : customBatchJobHandlers) { batchHandlers.put(customBatchJobHandler.getType(), customBatchJobHandler); } } } // command executors //////////////////////////////////////////////////////// protected abstract Collection<? extends CommandInterceptor> getDefaultCommandInterceptorsTxRequired(); protected abstract Collection<? extends CommandInterceptor> getDefaultCommandInterceptorsTxRequiresNew(); protected void initCommandExecutors() { initActualCommandExecutor(); initCommandInterceptorsTxRequired(); initCommandExecutorTxRequired(); initCommandInterceptorsTxRequiresNew(); initCommandExecutorTxRequiresNew(); initCommandExecutorDbSchemaOperations(); } protected void initActualCommandExecutor() { actualCommandExecutor = new CommandExecutorImpl(); } protected void initCommandInterceptorsTxRequired() { if (commandInterceptorsTxRequired == null) { if (customPreCommandInterceptorsTxRequired != null) { commandInterceptorsTxRequired = new ArrayList<>(customPreCommandInterceptorsTxRequired); } else { commandInterceptorsTxRequired = new ArrayList<>(); } commandInterceptorsTxRequired.addAll(getDefaultCommandInterceptorsTxRequired()); if (customPostCommandInterceptorsTxRequired != null) { commandInterceptorsTxRequired.addAll(customPostCommandInterceptorsTxRequired); } commandInterceptorsTxRequired.add(actualCommandExecutor); } } protected void initCommandInterceptorsTxRequiresNew() { if (commandInterceptorsTxRequiresNew == null) { if (customPreCommandInterceptorsTxRequiresNew != null) { commandInterceptorsTxRequiresNew = new ArrayList<>(customPreCommandInterceptorsTxRequiresNew); } else { commandInterceptorsTxRequiresNew = new ArrayList<>(); } commandInterceptorsTxRequiresNew.addAll(getDefaultCommandInterceptorsTxRequiresNew()); if (customPostCommandInterceptorsTxRequiresNew != null) { commandInterceptorsTxRequiresNew.addAll(customPostCommandInterceptorsTxRequiresNew); } commandInterceptorsTxRequiresNew.add(actualCommandExecutor); } } protected void initCommandExecutorTxRequired() { if (commandExecutorTxRequired == null) { commandExecutorTxRequired = initInterceptorChain(commandInterceptorsTxRequired); } } protected void initCommandExecutorTxRequiresNew() { if (commandExecutorTxRequiresNew == null) { commandExecutorTxRequiresNew = initInterceptorChain(commandInterceptorsTxRequiresNew); } } protected void initCommandExecutorDbSchemaOperations() { if (commandExecutorSchemaOperations == null) { // in default case, we use the same command executor for DB Schema Operations as for runtime operations. // configurations that Use JTA Transactions should override this method and provide a custom command executor // that uses NON-JTA Transactions. commandExecutorSchemaOperations = commandExecutorTxRequired; } } protected CommandInterceptor initInterceptorChain(List<CommandInterceptor> chain) { if (chain == null || chain.isEmpty()) { throw new ProcessEngineException("invalid command interceptor chain configuration: " + chain); } for (int i = 0; i < chain.size() - 1; i++) { chain.get(i).setNext(chain.get(i + 1)); } return chain.get(0); } // services ///////////////////////////////////////////////////////////////// protected void initServices() { initService(repositoryService); initService(runtimeService); initService(historyService); initService(identityService); initService(taskService); initService(formService); initService(managementService); initService(authorizationService); initService(caseService); initService(filterService); initService(externalTaskService); initService(decisionService); initService(optimizeService); } protected void initService(Object service) { if (service instanceof ServiceImpl) { ((ServiceImpl) service).setCommandExecutor(commandExecutorTxRequired); } if (service instanceof RepositoryServiceImpl) { ((RepositoryServiceImpl) service).setDeploymentCharset(getDefaultCharset()); } } // DataSource /////////////////////////////////////////////////////////////// protected void initDataSource() { if (dataSource == null) { if (dataSourceJndiName != null) { try { dataSource = (DataSource) new InitialContext().lookup(dataSourceJndiName); } catch (Exception e) { throw new ProcessEngineException("couldn't lookup datasource from " + dataSourceJndiName + ": " + e.getMessage(), e); } } else if (jdbcUrl != null) { if ((jdbcDriver == null) || (jdbcUrl == null) || (jdbcUsername == null)) { throw new ProcessEngineException("DataSource or JDBC properties have to be specified in a process engine configuration"); } PooledDataSource pooledDataSource = new PooledDataSource(ReflectUtil.getClassLoader(), jdbcDriver, jdbcUrl, jdbcUsername, jdbcPassword); if (jdbcMaxActiveConnections > 0) { pooledDataSource.setPoolMaximumActiveConnections(jdbcMaxActiveConnections); } if (jdbcMaxIdleConnections > 0) { pooledDataSource.setPoolMaximumIdleConnections(jdbcMaxIdleConnections); } if (jdbcMaxCheckoutTime > 0) { pooledDataSource.setPoolMaximumCheckoutTime(jdbcMaxCheckoutTime); } if (jdbcMaxWaitTime > 0) { pooledDataSource.setPoolTimeToWait(jdbcMaxWaitTime); } if (jdbcPingEnabled == true) { pooledDataSource.setPoolPingEnabled(true); if (jdbcPingQuery != null) { pooledDataSource.setPoolPingQuery(jdbcPingQuery); } pooledDataSource.setPoolPingConnectionsNotUsedFor(jdbcPingConnectionNotUsedFor); } dataSource = pooledDataSource; } if (dataSource instanceof PooledDataSource) { // ACT-233: connection pool of Ibatis is not properely initialized if this is not called! ((PooledDataSource) dataSource).forceCloseAll(); } } if (databaseType == null) { initDatabaseType(); } } protected static Properties databaseTypeMappings = getDefaultDatabaseTypeMappings(); protected static final String MY_SQL_PRODUCT_NAME = "MySQL"; protected static final String MARIA_DB_PRODUCT_NAME = "MariaDB"; protected static Properties getDefaultDatabaseTypeMappings() { Properties databaseTypeMappings = new Properties(); databaseTypeMappings.setProperty("H2", "h2"); databaseTypeMappings.setProperty(MY_SQL_PRODUCT_NAME, "mysql"); databaseTypeMappings.setProperty(MARIA_DB_PRODUCT_NAME, "mariadb"); databaseTypeMappings.setProperty("Oracle", "oracle"); databaseTypeMappings.setProperty("PostgreSQL", "postgres"); databaseTypeMappings.setProperty("Microsoft SQL Server", "mssql"); databaseTypeMappings.setProperty("DB2", "db2"); databaseTypeMappings.setProperty("DB2", "db2"); databaseTypeMappings.setProperty("DB2/NT", "db2"); databaseTypeMappings.setProperty("DB2/NT64", "db2"); databaseTypeMappings.setProperty("DB2 UDP", "db2"); databaseTypeMappings.setProperty("DB2/LINUX", "db2"); databaseTypeMappings.setProperty("DB2/LINUX390", "db2"); databaseTypeMappings.setProperty("DB2/LINUXX8664", "db2"); databaseTypeMappings.setProperty("DB2/LINUXZ64", "db2"); databaseTypeMappings.setProperty("DB2/400 SQL", "db2"); databaseTypeMappings.setProperty("DB2/6000", "db2"); databaseTypeMappings.setProperty("DB2 UDB iSeries", "db2"); databaseTypeMappings.setProperty("DB2/AIX64", "db2"); databaseTypeMappings.setProperty("DB2/HPUX", "db2"); databaseTypeMappings.setProperty("DB2/HP64", "db2"); databaseTypeMappings.setProperty("DB2/SUN", "db2"); databaseTypeMappings.setProperty("DB2/SUN64", "db2"); databaseTypeMappings.setProperty("DB2/PTX", "db2"); databaseTypeMappings.setProperty("DB2/2", "db2"); return databaseTypeMappings; } public void initDatabaseType() { Connection connection = null; try { connection = dataSource.getConnection(); DatabaseMetaData databaseMetaData = connection.getMetaData(); String databaseProductName = databaseMetaData.getDatabaseProductName(); if (MY_SQL_PRODUCT_NAME.equals(databaseProductName)) { databaseProductName = checkForMariaDb(databaseMetaData, databaseProductName); } LOG.debugDatabaseproductName(databaseProductName); databaseType = databaseTypeMappings.getProperty(databaseProductName); ensureNotNull("couldn't deduct database type from database product name '" + databaseProductName + "'", "databaseType", databaseType); LOG.debugDatabaseType(databaseType); } catch (SQLException e) { LOG.databaseConnectionAccessException(e); } finally { try { if (connection != null) { connection.close(); } } catch (SQLException e) { LOG.databaseConnectionCloseException(e); } } } /** * The product name of mariadb is still 'MySQL'. This method * tries if it can find some evidence for mariadb. If it is successful * it will return "MariaDB", otherwise the provided database name. */ protected String checkForMariaDb(DatabaseMetaData databaseMetaData, String databaseName) { try { String databaseProductVersion = databaseMetaData.getDatabaseProductVersion(); if (databaseProductVersion != null && databaseProductVersion.toLowerCase().contains("mariadb")) { return MARIA_DB_PRODUCT_NAME; } } catch (SQLException ignore) { } try { String driverName = databaseMetaData.getDriverName(); if (driverName != null && driverName.toLowerCase().contains("mariadb")) { return MARIA_DB_PRODUCT_NAME; } } catch (SQLException ignore) { } String metaDataClassName = databaseMetaData.getClass().getName(); if (metaDataClassName != null && metaDataClassName.toLowerCase().contains("mariadb")) { return MARIA_DB_PRODUCT_NAME; } return databaseName; } // myBatis SqlSessionFactory //////////////////////////////////////////////// protected void initTransactionFactory() { if (transactionFactory == null) { if (transactionsExternallyManaged) { transactionFactory = new ManagedTransactionFactory(); } else { transactionFactory = new JdbcTransactionFactory(); } } } protected void initSqlSessionFactory() { // to protect access to cachedSqlSessionFactory see CAM-6682 synchronized (ProcessEngineConfigurationImpl.class) { if (isUseSharedSqlSessionFactory) { sqlSessionFactory = cachedSqlSessionFactory; } if (sqlSessionFactory == null) { InputStream inputStream = null; try { inputStream = getMyBatisXmlConfigurationSteam(); // update the jdbc parameters to the configured ones... Environment environment = new Environment("default", transactionFactory, dataSource); Reader reader = new InputStreamReader(inputStream); Properties properties = new Properties(); if (isUseSharedSqlSessionFactory) { properties.put("prefix", "${@org.camunda.bpm.engine.impl.context.Context@getProcessEngineConfiguration().databaseTablePrefix}"); } else { properties.put("prefix", databaseTablePrefix); } initSqlSessionFactoryProperties(properties, databaseTablePrefix, databaseType); XMLConfigBuilder parser = new XMLConfigBuilder(reader, "", properties); Configuration configuration = parser.getConfiguration(); configuration.setEnvironment(environment); configuration = parser.parse(); configuration.setDefaultStatementTimeout(jdbcStatementTimeout); if (isJdbcBatchProcessing()) { configuration.setDefaultExecutorType(ExecutorType.BATCH); } sqlSessionFactory = new DefaultSqlSessionFactory(configuration); if (isUseSharedSqlSessionFactory) { cachedSqlSessionFactory = sqlSessionFactory; } } catch (Exception e) { throw new ProcessEngineException("Error while building ibatis SqlSessionFactory: " + e.getMessage(), e); } finally { IoUtil.closeSilently(inputStream); } } } } public static void initSqlSessionFactoryProperties(Properties properties, String databaseTablePrefix, String databaseType) { if (databaseType != null) { properties.put("limitBefore", DbSqlSessionFactory.databaseSpecificLimitBeforeStatements.get(databaseType)); properties.put("limitAfter", DbSqlSessionFactory.databaseSpecificLimitAfterStatements.get(databaseType)); properties.put("limitBeforeWithoutOffset", DbSqlSessionFactory.databaseSpecificLimitBeforeWithoutOffsetStatements.get(databaseType)); properties.put("limitAfterWithoutOffset", DbSqlSessionFactory.databaseSpecificLimitAfterWithoutOffsetStatements.get(databaseType)); properties.put("optimizeLimitBeforeWithoutOffset", DbSqlSessionFactory.optimizeDatabaseSpecificLimitBeforeWithoutOffsetStatements.get(databaseType)); properties.put("optimizeLimitAfterWithoutOffset", DbSqlSessionFactory.optimizeDatabaseSpecificLimitAfterWithoutOffsetStatements.get(databaseType)); properties.put("innerLimitAfter", DbSqlSessionFactory.databaseSpecificInnerLimitAfterStatements.get(databaseType)); properties.put("limitBetween", DbSqlSessionFactory.databaseSpecificLimitBetweenStatements.get(databaseType)); properties.put("limitBetweenFilter", DbSqlSessionFactory.databaseSpecificLimitBetweenFilterStatements.get(databaseType)); properties.put("limitBetweenAcquisition", DbSqlSessionFactory.databaseSpecificLimitBetweenAcquisitionStatements.get(databaseType)); properties.put("orderBy", DbSqlSessionFactory.databaseSpecificOrderByStatements.get(databaseType)); properties.put("limitBeforeNativeQuery", DbSqlSessionFactory.databaseSpecificLimitBeforeNativeQueryStatements.get(databaseType)); properties.put("distinct", DbSqlSessionFactory.databaseSpecificDistinct.get(databaseType)); properties.put("countDistinctBeforeStart", DbSqlSessionFactory.databaseSpecificCountDistinctBeforeStart.get(databaseType)); properties.put("countDistinctBeforeEnd", DbSqlSessionFactory.databaseSpecificCountDistinctBeforeEnd.get(databaseType)); properties.put("countDistinctAfterEnd", DbSqlSessionFactory.databaseSpecificCountDistinctAfterEnd.get(databaseType)); properties.put("escapeChar", DbSqlSessionFactory.databaseSpecificEscapeChar.get(databaseType)); properties.put("bitand1", DbSqlSessionFactory.databaseSpecificBitAnd1.get(databaseType)); properties.put("bitand2", DbSqlSessionFactory.databaseSpecificBitAnd2.get(databaseType)); properties.put("bitand3", DbSqlSessionFactory.databaseSpecificBitAnd3.get(databaseType)); properties.put("datepart1", DbSqlSessionFactory.databaseSpecificDatepart1.get(databaseType)); properties.put("datepart2", DbSqlSessionFactory.databaseSpecificDatepart2.get(databaseType)); properties.put("datepart3", DbSqlSessionFactory.databaseSpecificDatepart3.get(databaseType)); properties.put("trueConstant", DbSqlSessionFactory.databaseSpecificTrueConstant.get(databaseType)); properties.put("falseConstant", DbSqlSessionFactory.databaseSpecificFalseConstant.get(databaseType)); properties.put("dbSpecificDummyTable", DbSqlSessionFactory.databaseSpecificDummyTable.get(databaseType)); properties.put("dbSpecificIfNullFunction", DbSqlSessionFactory.databaseSpecificIfNull.get(databaseType)); properties.put("dayComparator", DbSqlSessionFactory.databaseSpecificDaysComparator.get(databaseType)); properties.put("collationForCaseSensitivity", DbSqlSessionFactory.databaseSpecificCollationForCaseSensitivity.get(databaseType)); Map<String, String> constants = DbSqlSessionFactory.dbSpecificConstants.get(databaseType); for (Entry<String, String> entry : constants.entrySet()) { properties.put(entry.getKey(), entry.getValue()); } } } protected InputStream getMyBatisXmlConfigurationSteam() { return ReflectUtil.getResourceAsStream(DEFAULT_MYBATIS_MAPPING_FILE); } // session factories //////////////////////////////////////////////////////// protected void initIdentityProviderSessionFactory() { if (identityProviderSessionFactory == null) { identityProviderSessionFactory = new GenericManagerFactory(DbIdentityServiceProvider.class); } } protected void initSessionFactories() { if (sessionFactories == null) { sessionFactories = new HashMap<>(); initPersistenceProviders(); addSessionFactory(new DbEntityManagerFactory(idGenerator)); addSessionFactory(new GenericManagerFactory(AttachmentManager.class)); addSessionFactory(new GenericManagerFactory(CommentManager.class)); addSessionFactory(new GenericManagerFactory(DeploymentManager.class)); addSessionFactory(new GenericManagerFactory(ExecutionManager.class)); addSessionFactory(new GenericManagerFactory(HistoricActivityInstanceManager.class)); addSessionFactory(new GenericManagerFactory(HistoricCaseActivityInstanceManager.class)); addSessionFactory(new GenericManagerFactory(HistoricStatisticsManager.class)); addSessionFactory(new GenericManagerFactory(HistoricDetailManager.class)); addSessionFactory(new GenericManagerFactory(HistoricProcessInstanceManager.class)); addSessionFactory(new GenericManagerFactory(HistoricCaseInstanceManager.class)); addSessionFactory(new GenericManagerFactory(UserOperationLogManager.class)); addSessionFactory(new GenericManagerFactory(HistoricTaskInstanceManager.class)); addSessionFactory(new GenericManagerFactory(HistoricVariableInstanceManager.class)); addSessionFactory(new GenericManagerFactory(HistoricIncidentManager.class)); addSessionFactory(new GenericManagerFactory(HistoricIdentityLinkLogManager.class)); addSessionFactory(new GenericManagerFactory(HistoricJobLogManager.class)); addSessionFactory(new GenericManagerFactory(HistoricExternalTaskLogManager.class)); addSessionFactory(new GenericManagerFactory(IdentityInfoManager.class)); addSessionFactory(new GenericManagerFactory(IdentityLinkManager.class)); addSessionFactory(new GenericManagerFactory(JobManager.class)); addSessionFactory(new GenericManagerFactory(JobDefinitionManager.class)); addSessionFactory(new GenericManagerFactory(ProcessDefinitionManager.class)); addSessionFactory(new GenericManagerFactory(PropertyManager.class)); addSessionFactory(new GenericManagerFactory(ResourceManager.class)); addSessionFactory(new GenericManagerFactory(ByteArrayManager.class)); addSessionFactory(new GenericManagerFactory(TableDataManager.class)); addSessionFactory(new GenericManagerFactory(TaskManager.class)); addSessionFactory(new GenericManagerFactory(TaskReportManager.class)); addSessionFactory(new GenericManagerFactory(VariableInstanceManager.class)); addSessionFactory(new GenericManagerFactory(EventSubscriptionManager.class)); addSessionFactory(new GenericManagerFactory(StatisticsManager.class)); addSessionFactory(new GenericManagerFactory(IncidentManager.class)); addSessionFactory(new GenericManagerFactory(AuthorizationManager.class)); addSessionFactory(new GenericManagerFactory(FilterManager.class)); addSessionFactory(new GenericManagerFactory(MeterLogManager.class)); addSessionFactory(new GenericManagerFactory(ExternalTaskManager.class)); addSessionFactory(new GenericManagerFactory(ReportManager.class)); addSessionFactory(new GenericManagerFactory(BatchManager.class)); addSessionFactory(new GenericManagerFactory(HistoricBatchManager.class)); addSessionFactory(new GenericManagerFactory(TenantManager.class)); addSessionFactory(new GenericManagerFactory(SchemaLogManager.class)); addSessionFactory(new GenericManagerFactory(CaseDefinitionManager.class)); addSessionFactory(new GenericManagerFactory(CaseExecutionManager.class)); addSessionFactory(new GenericManagerFactory(CaseSentryPartManager.class)); addSessionFactory(new GenericManagerFactory(DecisionDefinitionManager.class)); addSessionFactory(new GenericManagerFactory(DecisionRequirementsDefinitionManager.class)); addSessionFactory(new GenericManagerFactory(HistoricDecisionInstanceManager.class)); addSessionFactory(new GenericManagerFactory(OptimizeManager.class)); sessionFactories.put(ReadOnlyIdentityProvider.class, identityProviderSessionFactory); // check whether identityProviderSessionFactory implements WritableIdentityProvider Class<?> identityProviderType = identityProviderSessionFactory.getSessionType(); if (WritableIdentityProvider.class.isAssignableFrom(identityProviderType)) { sessionFactories.put(WritableIdentityProvider.class, identityProviderSessionFactory); } } if (customSessionFactories != null) { for (SessionFactory sessionFactory : customSessionFactories) { addSessionFactory(sessionFactory); } } } protected void initPersistenceProviders() { ensurePrefixAndSchemaFitToegether(databaseTablePrefix, databaseSchema); dbSqlSessionFactory = new DbSqlSessionFactory(jdbcBatchProcessing); dbSqlSessionFactory.setDatabaseType(databaseType); dbSqlSessionFactory.setIdGenerator(idGenerator); dbSqlSessionFactory.setSqlSessionFactory(sqlSessionFactory); dbSqlSessionFactory.setDbIdentityUsed(isDbIdentityUsed); dbSqlSessionFactory.setDbHistoryUsed(isDbHistoryUsed); dbSqlSessionFactory.setCmmnEnabled(cmmnEnabled); dbSqlSessionFactory.setDmnEnabled(dmnEnabled); dbSqlSessionFactory.setDatabaseTablePrefix(databaseTablePrefix); //hack for the case when schema is defined via databaseTablePrefix parameter and not via databaseSchema parameter if (databaseTablePrefix != null && databaseSchema == null && databaseTablePrefix.contains(".")) { databaseSchema = databaseTablePrefix.split("\\.")[0]; } dbSqlSessionFactory.setDatabaseSchema(databaseSchema); addSessionFactory(dbSqlSessionFactory); addSessionFactory(new DbSqlPersistenceProviderFactory()); } protected void initMigration() { initMigrationInstructionValidators(); initMigrationActivityMatcher(); initMigrationInstructionGenerator(); initMigratingActivityInstanceValidators(); initMigratingTransitionInstanceValidators(); initMigratingCompensationInstanceValidators(); } protected void initMigrationActivityMatcher() { if (migrationActivityMatcher == null) { migrationActivityMatcher = new DefaultMigrationActivityMatcher(); } } protected void initMigrationInstructionGenerator() { if (migrationInstructionGenerator == null) { migrationInstructionGenerator = new DefaultMigrationInstructionGenerator(migrationActivityMatcher); } List<MigrationActivityValidator> migrationActivityValidators = new ArrayList<>(); if (customPreMigrationActivityValidators != null) { migrationActivityValidators.addAll(customPreMigrationActivityValidators); } migrationActivityValidators.addAll(getDefaultMigrationActivityValidators()); if (customPostMigrationActivityValidators != null) { migrationActivityValidators.addAll(customPostMigrationActivityValidators); } migrationInstructionGenerator = migrationInstructionGenerator .migrationActivityValidators(migrationActivityValidators) .migrationInstructionValidators(migrationInstructionValidators); } protected void initMigrationInstructionValidators() { if (migrationInstructionValidators == null) { migrationInstructionValidators = new ArrayList<>(); if (customPreMigrationInstructionValidators != null) { migrationInstructionValidators.addAll(customPreMigrationInstructionValidators); } migrationInstructionValidators.addAll(getDefaultMigrationInstructionValidators()); if (customPostMigrationInstructionValidators != null) { migrationInstructionValidators.addAll(customPostMigrationInstructionValidators); } } } protected void initMigratingActivityInstanceValidators() { if (migratingActivityInstanceValidators == null) { migratingActivityInstanceValidators = new ArrayList<>(); if (customPreMigratingActivityInstanceValidators != null) { migratingActivityInstanceValidators.addAll(customPreMigratingActivityInstanceValidators); } migratingActivityInstanceValidators.addAll(getDefaultMigratingActivityInstanceValidators()); if (customPostMigratingActivityInstanceValidators != null) { migratingActivityInstanceValidators.addAll(customPostMigratingActivityInstanceValidators); } } } protected void initMigratingTransitionInstanceValidators() { if (migratingTransitionInstanceValidators == null) { migratingTransitionInstanceValidators = new ArrayList<>(); migratingTransitionInstanceValidators.addAll(getDefaultMigratingTransitionInstanceValidators()); } } protected void initMigratingCompensationInstanceValidators() { if (migratingCompensationInstanceValidators == null) { migratingCompensationInstanceValidators = new ArrayList<>(); migratingCompensationInstanceValidators.add(new NoUnmappedLeafInstanceValidator()); migratingCompensationInstanceValidators.add(new NoUnmappedCompensationStartEventValidator()); } } /** * When providing a schema and a prefix the prefix has to be the schema ending with a dot. */ protected void ensurePrefixAndSchemaFitToegether(String prefix, String schema) { if (schema == null) { return; } else if (prefix == null || (prefix != null && !prefix.startsWith(schema + "."))) { throw new ProcessEngineException("When setting a schema the prefix has to be schema + '.'. Received schema: " + schema + " prefix: " + prefix); } } protected void addSessionFactory(SessionFactory sessionFactory) { sessionFactories.put(sessionFactory.getSessionType(), sessionFactory); } // deployers //////////////////////////////////////////////////////////////// protected void initDeployers() { if (this.deployers == null) { this.deployers = new ArrayList<>(); if (customPreDeployers != null) { this.deployers.addAll(customPreDeployers); } this.deployers.addAll(getDefaultDeployers()); if (customPostDeployers != null) { this.deployers.addAll(customPostDeployers); } } if (deploymentCache == null) { List<Deployer> deployers = new ArrayList<>(); if (customPreDeployers != null) { deployers.addAll(customPreDeployers); } deployers.addAll(getDefaultDeployers()); if (customPostDeployers != null) { deployers.addAll(customPostDeployers); } initCacheFactory(); deploymentCache = new DeploymentCache(cacheFactory, cacheCapacity); deploymentCache.setDeployers(deployers); } } protected Collection<? extends Deployer> getDefaultDeployers() { List<Deployer> defaultDeployers = new ArrayList<>(); BpmnDeployer bpmnDeployer = getBpmnDeployer(); defaultDeployers.add(bpmnDeployer); if (isCmmnEnabled()) { CmmnDeployer cmmnDeployer = getCmmnDeployer(); defaultDeployers.add(cmmnDeployer); } if (isDmnEnabled()) { DecisionRequirementsDefinitionDeployer decisionRequirementsDefinitionDeployer = getDecisionRequirementsDefinitionDeployer(); DecisionDefinitionDeployer decisionDefinitionDeployer = getDecisionDefinitionDeployer(); // the DecisionRequirementsDefinition cacheDeployer must be before the DecisionDefinitionDeployer defaultDeployers.add(decisionRequirementsDefinitionDeployer); defaultDeployers.add(decisionDefinitionDeployer); } return defaultDeployers; } protected BpmnDeployer getBpmnDeployer() { BpmnDeployer bpmnDeployer = new BpmnDeployer(); bpmnDeployer.setExpressionManager(expressionManager); bpmnDeployer.setIdGenerator(idGenerator); if (bpmnParseFactory == null) { bpmnParseFactory = new DefaultBpmnParseFactory(); } BpmnParser bpmnParser = new BpmnParser(expressionManager, bpmnParseFactory); if (preParseListeners != null) { bpmnParser.getParseListeners().addAll(preParseListeners); } bpmnParser.getParseListeners().addAll(getDefaultBPMNParseListeners()); if (postParseListeners != null) { bpmnParser.getParseListeners().addAll(postParseListeners); } bpmnDeployer.setBpmnParser(bpmnParser); return bpmnDeployer; } protected List<BpmnParseListener> getDefaultBPMNParseListeners() { List<BpmnParseListener> defaultListeners = new ArrayList<>(); if (!HistoryLevel.HISTORY_LEVEL_NONE.equals(historyLevel)) { defaultListeners.add(new HistoryParseListener(historyEventProducer)); } if (isMetricsEnabled) { defaultListeners.add(new MetricsBpmnParseListener()); } return defaultListeners; } protected CmmnDeployer getCmmnDeployer() { CmmnDeployer cmmnDeployer = new CmmnDeployer(); cmmnDeployer.setIdGenerator(idGenerator); if (cmmnTransformFactory == null) { cmmnTransformFactory = new DefaultCmmnTransformFactory(); } if (cmmnElementHandlerRegistry == null) { cmmnElementHandlerRegistry = new DefaultCmmnElementHandlerRegistry(); } CmmnTransformer cmmnTransformer = new CmmnTransformer(expressionManager, cmmnElementHandlerRegistry, cmmnTransformFactory); if (customPreCmmnTransformListeners != null) { cmmnTransformer.getTransformListeners().addAll(customPreCmmnTransformListeners); } cmmnTransformer.getTransformListeners().addAll(getDefaultCmmnTransformListeners()); if (customPostCmmnTransformListeners != null) { cmmnTransformer.getTransformListeners().addAll(customPostCmmnTransformListeners); } cmmnDeployer.setTransformer(cmmnTransformer); return cmmnDeployer; } protected List<CmmnTransformListener> getDefaultCmmnTransformListeners() { List<CmmnTransformListener> defaultListener = new ArrayList<>(); if (!HistoryLevel.HISTORY_LEVEL_NONE.equals(historyLevel)) { defaultListener.add(new CmmnHistoryTransformListener(cmmnHistoryEventProducer)); } if (isMetricsEnabled) { defaultListener.add(new MetricsCmmnTransformListener()); } return defaultListener; } protected DecisionDefinitionDeployer getDecisionDefinitionDeployer() { DecisionDefinitionDeployer decisionDefinitionDeployer = new DecisionDefinitionDeployer(); decisionDefinitionDeployer.setIdGenerator(idGenerator); decisionDefinitionDeployer.setTransformer(dmnEngineConfiguration.getTransformer()); return decisionDefinitionDeployer; } protected DecisionRequirementsDefinitionDeployer getDecisionRequirementsDefinitionDeployer() { DecisionRequirementsDefinitionDeployer drdDeployer = new DecisionRequirementsDefinitionDeployer(); drdDeployer.setIdGenerator(idGenerator); drdDeployer.setTransformer(dmnEngineConfiguration.getTransformer()); return drdDeployer; } public DmnEngine getDmnEngine() { return dmnEngine; } public void setDmnEngine(DmnEngine dmnEngine) { this.dmnEngine = dmnEngine; } public DefaultDmnEngineConfiguration getDmnEngineConfiguration() { return dmnEngineConfiguration; } public void setDmnEngineConfiguration(DefaultDmnEngineConfiguration dmnEngineConfiguration) { this.dmnEngineConfiguration = dmnEngineConfiguration; } // job executor ///////////////////////////////////////////////////////////// protected void initJobExecutor() { if (jobExecutor == null) { jobExecutor = new DefaultJobExecutor(); } jobHandlers = new HashMap<>(); TimerExecuteNestedActivityJobHandler timerExecuteNestedActivityJobHandler = new TimerExecuteNestedActivityJobHandler(); jobHandlers.put(timerExecuteNestedActivityJobHandler.getType(), timerExecuteNestedActivityJobHandler); TimerCatchIntermediateEventJobHandler timerCatchIntermediateEvent = new TimerCatchIntermediateEventJobHandler(); jobHandlers.put(timerCatchIntermediateEvent.getType(), timerCatchIntermediateEvent); TimerStartEventJobHandler timerStartEvent = new TimerStartEventJobHandler(); jobHandlers.put(timerStartEvent.getType(), timerStartEvent); TimerStartEventSubprocessJobHandler timerStartEventSubprocess = new TimerStartEventSubprocessJobHandler(); jobHandlers.put(timerStartEventSubprocess.getType(), timerStartEventSubprocess); AsyncContinuationJobHandler asyncContinuationJobHandler = new AsyncContinuationJobHandler(); jobHandlers.put(asyncContinuationJobHandler.getType(), asyncContinuationJobHandler); ProcessEventJobHandler processEventJobHandler = new ProcessEventJobHandler(); jobHandlers.put(processEventJobHandler.getType(), processEventJobHandler); TimerSuspendProcessDefinitionHandler suspendProcessDefinitionHandler = new TimerSuspendProcessDefinitionHandler(); jobHandlers.put(suspendProcessDefinitionHandler.getType(), suspendProcessDefinitionHandler); TimerActivateProcessDefinitionHandler activateProcessDefinitionHandler = new TimerActivateProcessDefinitionHandler(); jobHandlers.put(activateProcessDefinitionHandler.getType(), activateProcessDefinitionHandler); TimerSuspendJobDefinitionHandler suspendJobDefinitionHandler = new TimerSuspendJobDefinitionHandler(); jobHandlers.put(suspendJobDefinitionHandler.getType(), suspendJobDefinitionHandler); TimerActivateJobDefinitionHandler activateJobDefinitionHandler = new TimerActivateJobDefinitionHandler(); jobHandlers.put(activateJobDefinitionHandler.getType(), activateJobDefinitionHandler); TimerTaskListenerJobHandler taskListenerJobHandler = new TimerTaskListenerJobHandler(); jobHandlers.put(taskListenerJobHandler.getType(), taskListenerJobHandler); BatchSeedJobHandler batchSeedJobHandler = new BatchSeedJobHandler(); jobHandlers.put(batchSeedJobHandler.getType(), batchSeedJobHandler); BatchMonitorJobHandler batchMonitorJobHandler = new BatchMonitorJobHandler(); jobHandlers.put(batchMonitorJobHandler.getType(), batchMonitorJobHandler); HistoryCleanupJobHandler historyCleanupJobHandler = new HistoryCleanupJobHandler(); jobHandlers.put(historyCleanupJobHandler.getType(), historyCleanupJobHandler); for (JobHandler batchHandler : batchHandlers.values()) { jobHandlers.put(batchHandler.getType(), batchHandler); } // if we have custom job handlers, register them if (getCustomJobHandlers() != null) { for (JobHandler customJobHandler : getCustomJobHandlers()) { jobHandlers.put(customJobHandler.getType(), customJobHandler); } } jobExecutor.setAutoActivate(jobExecutorActivate); if (jobExecutor.getRejectedJobsHandler() == null) { if (customRejectedJobsHandler != null) { jobExecutor.setRejectedJobsHandler(customRejectedJobsHandler); } else { jobExecutor.setRejectedJobsHandler(new NotifyAcquisitionRejectedJobsHandler()); } } } protected void initJobProvider() { if (producePrioritizedJobs && jobPriorityProvider == null) { jobPriorityProvider = new DefaultJobPriorityProvider(); } } //external task ///////////////////////////////////////////////////////////// protected void initExternalTaskPriorityProvider() { if (producePrioritizedExternalTasks && externalTaskPriorityProvider == null) { externalTaskPriorityProvider = new DefaultExternalTaskPriorityProvider(); } } // history ////////////////////////////////////////////////////////////////// public void initHistoryLevel() { if (historyLevel != null) { setHistory(historyLevel.getName()); } if (historyLevels == null) { historyLevels = new ArrayList<>(); historyLevels.add(HistoryLevel.HISTORY_LEVEL_NONE); historyLevels.add(HistoryLevel.HISTORY_LEVEL_ACTIVITY); historyLevels.add(HistoryLevel.HISTORY_LEVEL_AUDIT); historyLevels.add(HistoryLevel.HISTORY_LEVEL_FULL); } if (customHistoryLevels != null) { historyLevels.addAll(customHistoryLevels); } if (HISTORY_VARIABLE.equalsIgnoreCase(history)) { historyLevel = HistoryLevel.HISTORY_LEVEL_ACTIVITY; LOG.usingDeprecatedHistoryLevelVariable(); } else { for (HistoryLevel historyLevel : historyLevels) { if (historyLevel.getName().equalsIgnoreCase(history)) { this.historyLevel = historyLevel; } } } // do allow null for history level in case of "auto" if (historyLevel == null && !ProcessEngineConfiguration.HISTORY_AUTO.equalsIgnoreCase(history)) { throw new ProcessEngineException("invalid history level: " + history); } } // id generator ///////////////////////////////////////////////////////////// protected void initIdGenerator() { if (idGenerator == null) { CommandExecutor idGeneratorCommandExecutor = null; if (idGeneratorDataSource != null) { ProcessEngineConfigurationImpl processEngineConfiguration = new StandaloneProcessEngineConfiguration(); processEngineConfiguration.setDataSource(idGeneratorDataSource); processEngineConfiguration.setDatabaseSchemaUpdate(DB_SCHEMA_UPDATE_FALSE); processEngineConfiguration.init(); idGeneratorCommandExecutor = processEngineConfiguration.getCommandExecutorTxRequiresNew(); } else if (idGeneratorDataSourceJndiName != null) { ProcessEngineConfigurationImpl processEngineConfiguration = new StandaloneProcessEngineConfiguration(); processEngineConfiguration.setDataSourceJndiName(idGeneratorDataSourceJndiName); processEngineConfiguration.setDatabaseSchemaUpdate(DB_SCHEMA_UPDATE_FALSE); processEngineConfiguration.init(); idGeneratorCommandExecutor = processEngineConfiguration.getCommandExecutorTxRequiresNew(); } else { idGeneratorCommandExecutor = commandExecutorTxRequiresNew; } DbIdGenerator dbIdGenerator = new DbIdGenerator(); dbIdGenerator.setIdBlockSize(idBlockSize); dbIdGenerator.setCommandExecutor(idGeneratorCommandExecutor); idGenerator = dbIdGenerator; } } // OTHER //////////////////////////////////////////////////////////////////// protected void initCommandContextFactory() { if (commandContextFactory == null) { commandContextFactory = new CommandContextFactory(); commandContextFactory.setProcessEngineConfiguration(this); } } protected void initTransactionContextFactory() { if (transactionContextFactory == null) { transactionContextFactory = new StandaloneTransactionContextFactory(); } } protected void initValueTypeResolver() { if (valueTypeResolver == null) { valueTypeResolver = new ValueTypeResolverImpl(); } } protected void initDefaultCharset() { if (defaultCharset == null) { if (defaultCharsetName == null) { defaultCharsetName = "UTF-8"; } defaultCharset = Charset.forName(defaultCharsetName); } } protected void initMetrics() { if (isMetricsEnabled) { if (metricsRegistry == null) { metricsRegistry = new MetricsRegistry(); } initDefaultMetrics(metricsRegistry); if (dbMetricsReporter == null) { dbMetricsReporter = new DbMetricsReporter(metricsRegistry, commandExecutorTxRequired); } } } protected void initHostName() { if (hostname == null) { if (hostnameProvider == null) { hostnameProvider = new SimpleIpBasedProvider(); } hostname = hostnameProvider.getHostname(this); } } protected void initDefaultMetrics(MetricsRegistry metricsRegistry) { metricsRegistry.createMeter(Metrics.ACTIVTY_INSTANCE_START); metricsRegistry.createMeter(Metrics.ACTIVTY_INSTANCE_END); metricsRegistry.createMeter(Metrics.JOB_ACQUISITION_ATTEMPT); metricsRegistry.createMeter(Metrics.JOB_ACQUIRED_SUCCESS); metricsRegistry.createMeter(Metrics.JOB_ACQUIRED_FAILURE); metricsRegistry.createMeter(Metrics.JOB_SUCCESSFUL); metricsRegistry.createMeter(Metrics.JOB_FAILED); metricsRegistry.createMeter(Metrics.JOB_LOCKED_EXCLUSIVE); metricsRegistry.createMeter(Metrics.JOB_EXECUTION_REJECTED); metricsRegistry.createMeter(Metrics.ROOT_PROCESS_INSTANCE_START); metricsRegistry.createMeter(Metrics.EXECUTED_DECISION_INSTANCES); metricsRegistry.createMeter(Metrics.EXECUTED_DECISION_ELEMENTS); } protected void initTelemetry() { if (telemetryEnabled) { // initialize telemetry reporter } } protected void initSerialization() { if (variableSerializers == null) { variableSerializers = new DefaultVariableSerializers(); if (customPreVariableSerializers != null) { for (TypedValueSerializer<?> customVariableType : customPreVariableSerializers) { variableSerializers.addSerializer(customVariableType); } } // register built-in serializers variableSerializers.addSerializer(new NullValueSerializer()); variableSerializers.addSerializer(new StringValueSerializer()); variableSerializers.addSerializer(new BooleanValueSerializer()); variableSerializers.addSerializer(new ShortValueSerializer()); variableSerializers.addSerializer(new IntegerValueSerializer()); variableSerializers.addSerializer(new LongValueSerlializer()); variableSerializers.addSerializer(new DateValueSerializer()); variableSerializers.addSerializer(new DoubleValueSerializer()); variableSerializers.addSerializer(new ByteArrayValueSerializer()); variableSerializers.addSerializer(new JavaObjectSerializer()); variableSerializers.addSerializer(new FileValueSerializer()); if (customPostVariableSerializers != null) { for (TypedValueSerializer<?> customVariableType : customPostVariableSerializers) { variableSerializers.addSerializer(customVariableType); } } } } protected void initFormEngines() { if (formEngines == null) { formEngines = new HashMap<>(); // html form engine = default form engine FormEngine defaultFormEngine = new HtmlFormEngine(); formEngines.put(null, defaultFormEngine); // default form engine is looked up with null formEngines.put(defaultFormEngine.getName(), defaultFormEngine); FormEngine juelFormEngine = new JuelFormEngine(); formEngines.put(juelFormEngine.getName(), juelFormEngine); } if (customFormEngines != null) { for (FormEngine formEngine : customFormEngines) { formEngines.put(formEngine.getName(), formEngine); } } } protected void initFormTypes() { if (formTypes == null) { formTypes = new FormTypes(); formTypes.addFormType(new StringFormType()); formTypes.addFormType(new LongFormType()); formTypes.addFormType(new DateFormType("dd/MM/yyyy")); formTypes.addFormType(new BooleanFormType()); } if (customFormTypes != null) { for (AbstractFormFieldType customFormType : customFormTypes) { formTypes.addFormType(customFormType); } } } protected void initFormFieldValidators() { if (formValidators == null) { formValidators = new FormValidators(); formValidators.addValidator("min", MinValidator.class); formValidators.addValidator("max", MaxValidator.class); formValidators.addValidator("minlength", MinLengthValidator.class); formValidators.addValidator("maxlength", MaxLengthValidator.class); formValidators.addValidator("required", RequiredValidator.class); formValidators.addValidator("readonly", ReadOnlyValidator.class); } if (customFormFieldValidators != null) { for (Entry<String, Class<? extends FormFieldValidator>> validator : customFormFieldValidators.entrySet()) { formValidators.addValidator(validator.getKey(), validator.getValue()); } } } protected void initScripting() { if (resolverFactories == null) { resolverFactories = new ArrayList<>(); resolverFactories.add(new MocksResolverFactory()); resolverFactories.add(new VariableScopeResolverFactory()); resolverFactories.add(new BeansResolverFactory()); } if (scriptingEngines == null) { scriptingEngines = new ScriptingEngines(new ScriptBindingsFactory(resolverFactories)); scriptingEngines.setEnableScriptEngineCaching(enableScriptEngineCaching); } if (scriptFactory == null) { scriptFactory = new ScriptFactory(); } if (scriptEnvResolvers == null) { scriptEnvResolvers = new ArrayList<>(); } if (scriptingEnvironment == null) { scriptingEnvironment = new ScriptingEnvironment(scriptFactory, scriptEnvResolvers, scriptingEngines); } } protected void initDmnEngine() { if (dmnEngine == null) { if (dmnEngineConfiguration == null) { dmnEngineConfiguration = (DefaultDmnEngineConfiguration) DmnEngineConfiguration.createDefaultDmnEngineConfiguration(); } dmnEngineConfiguration = new DmnEngineConfigurationBuilder(dmnEngineConfiguration) .dmnHistoryEventProducer(dmnHistoryEventProducer) .scriptEngineResolver(scriptingEngines) .expressionManager(expressionManager) .feelCustomFunctionProviders(dmnFeelCustomFunctionProviders) .enableFeelLegacyBehavior(dmnFeelEnableLegacyBehavior) .build(); dmnEngine = dmnEngineConfiguration.buildEngine(); } else if (dmnEngineConfiguration == null) { dmnEngineConfiguration = (DefaultDmnEngineConfiguration) dmnEngine.getConfiguration(); } } protected void initExpressionManager() { if (expressionManager == null) { expressionManager = new ExpressionManager(beans); } // add function mapper for command context (eg currentUser(), currentUserGroups()) expressionManager.addFunctionMapper(new CommandContextFunctionMapper()); // add function mapper for date time (eg now(), dateTime()) expressionManager.addFunctionMapper(new DateTimeFunctionMapper()); } protected void initBusinessCalendarManager() { if (businessCalendarManager == null) { MapBusinessCalendarManager mapBusinessCalendarManager = new MapBusinessCalendarManager(); mapBusinessCalendarManager.addBusinessCalendar(DurationBusinessCalendar.NAME, new DurationBusinessCalendar()); mapBusinessCalendarManager.addBusinessCalendar(DueDateBusinessCalendar.NAME, new DueDateBusinessCalendar()); mapBusinessCalendarManager.addBusinessCalendar(CycleBusinessCalendar.NAME, new CycleBusinessCalendar()); businessCalendarManager = mapBusinessCalendarManager; } } protected void initDelegateInterceptor() { if (delegateInterceptor == null) { delegateInterceptor = new DefaultDelegateInterceptor(); } } protected void initEventHandlers() { if (eventHandlers == null) { eventHandlers = new HashMap<>(); SignalEventHandler signalEventHander = new SignalEventHandler(); eventHandlers.put(signalEventHander.getEventHandlerType(), signalEventHander); CompensationEventHandler compensationEventHandler = new CompensationEventHandler(); eventHandlers.put(compensationEventHandler.getEventHandlerType(), compensationEventHandler); EventHandler messageEventHandler = new EventHandlerImpl(EventType.MESSAGE); eventHandlers.put(messageEventHandler.getEventHandlerType(), messageEventHandler); EventHandler conditionalEventHandler = new ConditionalEventHandler(); eventHandlers.put(conditionalEventHandler.getEventHandlerType(), conditionalEventHandler); } if (customEventHandlers != null) { for (EventHandler eventHandler : customEventHandlers) { eventHandlers.put(eventHandler.getEventHandlerType(), eventHandler); } } } protected void initCommandCheckers() { if (commandCheckers == null) { commandCheckers = new ArrayList<>(); // add the default command checkers commandCheckers.add(new TenantCommandChecker()); commandCheckers.add(new AuthorizationCommandChecker()); } } // JPA ////////////////////////////////////////////////////////////////////// protected void initJpa() { if (jpaPersistenceUnitName != null) { jpaEntityManagerFactory = JpaHelper.createEntityManagerFactory(jpaPersistenceUnitName); } if (jpaEntityManagerFactory != null) { sessionFactories.put(EntityManagerSession.class, new EntityManagerSessionFactory(jpaEntityManagerFactory, jpaHandleTransaction, jpaCloseEntityManager)); JPAVariableSerializer jpaType = (JPAVariableSerializer) variableSerializers.getSerializerByName(JPAVariableSerializer.NAME); // Add JPA-type if (jpaType == null) { // We try adding the variable right after byte serializer, if available int serializableIndex = variableSerializers.getSerializerIndexByName(ValueType.BYTES.getName()); if (serializableIndex > -1) { variableSerializers.addSerializer(new JPAVariableSerializer(), serializableIndex); } else { variableSerializers.addSerializer(new JPAVariableSerializer()); } } } } protected void initBeans() { if (beans == null) { beans = new HashMap<>(); } } protected void initArtifactFactory() { if (artifactFactory == null) { artifactFactory = new DefaultArtifactFactory(); } } protected void initProcessApplicationManager() { if (processApplicationManager == null) { processApplicationManager = new ProcessApplicationManager(); } } // correlation handler ////////////////////////////////////////////////////// protected void initCorrelationHandler() { if (correlationHandler == null) { correlationHandler = new DefaultCorrelationHandler(); } } // condition handler ////////////////////////////////////////////////////// protected void initConditionHandler() { if (conditionHandler == null) { conditionHandler = new DefaultConditionHandler(); } } // deployment handler ////////////////////////////////////////////////////// protected void initDeploymentHandlerFactory() { if (deploymentHandlerFactory == null) { deploymentHandlerFactory = new DefaultDeploymentHandlerFactory(); } } // history handlers ///////////////////////////////////////////////////// protected void initHistoryEventProducer() { if (historyEventProducer == null) { historyEventProducer = new CacheAwareHistoryEventProducer(); } } protected void initCmmnHistoryEventProducer() { if (cmmnHistoryEventProducer == null) { cmmnHistoryEventProducer = new CacheAwareCmmnHistoryEventProducer(); } } protected void initDmnHistoryEventProducer() { if (dmnHistoryEventProducer == null) { dmnHistoryEventProducer = new DefaultDmnHistoryEventProducer(); } } protected void initHistoryEventHandler() { if (historyEventHandler == null) { if (enableDefaultDbHistoryEventHandler) { historyEventHandler = new CompositeDbHistoryEventHandler(customHistoryEventHandlers); } else { historyEventHandler = new CompositeHistoryEventHandler(customHistoryEventHandlers); } } } // password digest ////////////////////////////////////////////////////////// protected void initPasswordDigest() { if(saltGenerator == null) { saltGenerator = new Default16ByteSaltGenerator(); } if (passwordEncryptor == null) { passwordEncryptor = new Sha512HashDigest(); } if(customPasswordChecker == null) { customPasswordChecker = Collections.emptyList(); } if(passwordManager == null) { passwordManager = new PasswordManager(passwordEncryptor, customPasswordChecker); } } public void initPasswordPolicy() { if(passwordPolicy == null && enablePasswordPolicy) { passwordPolicy = new DefaultPasswordPolicyImpl(); } } protected void initDeploymentRegistration() { if (registeredDeployments == null) { registeredDeployments = new CopyOnWriteArraySet<>(); } } // cache factory ////////////////////////////////////////////////////////// protected void initCacheFactory() { if (cacheFactory == null) { cacheFactory = new DefaultCacheFactory(); } } // resource authorization provider ////////////////////////////////////////// protected void initResourceAuthorizationProvider() { if (resourceAuthorizationProvider == null) { resourceAuthorizationProvider = new DefaultAuthorizationProvider(); } } protected void initPermissionProvider() { if (permissionProvider == null) { permissionProvider = new DefaultPermissionProvider(); } } protected void initDefaultUserPermissionForTask() { if (defaultUserPermissionForTask == null) { if (Permissions.UPDATE.getName().equals(defaultUserPermissionNameForTask)) { defaultUserPermissionForTask = Permissions.UPDATE; } else if (Permissions.TASK_WORK.getName().equals(defaultUserPermissionNameForTask)) { defaultUserPermissionForTask = Permissions.TASK_WORK; } else { throw LOG.invalidConfigDefaultUserPermissionNameForTask(defaultUserPermissionNameForTask, new String[]{Permissions.UPDATE.getName(), Permissions.TASK_WORK.getName()}); } } } protected void initAdminUser() { if (adminUsers == null) { adminUsers = new ArrayList<>(); } } protected void initAdminGroups() { if (adminGroups == null) { adminGroups = new ArrayList<>(); } if (adminGroups.isEmpty() || !(adminGroups.contains(Groups.CAMUNDA_ADMIN))) { adminGroups.add(Groups.CAMUNDA_ADMIN); } } // getters and setters ////////////////////////////////////////////////////// @Override public String getProcessEngineName() { return processEngineName; } public HistoryLevel getHistoryLevel() { return historyLevel; } public void setHistoryLevel(HistoryLevel historyLevel) { this.historyLevel = historyLevel; } public HistoryLevel getDefaultHistoryLevel() { if (historyLevels != null) { for (HistoryLevel historyLevel : historyLevels) { if (HISTORY_DEFAULT != null && HISTORY_DEFAULT.equalsIgnoreCase(historyLevel.getName())) { return historyLevel; } } } return null; } @Override public ProcessEngineConfigurationImpl setProcessEngineName(String processEngineName) { this.processEngineName = processEngineName; return this; } public List<CommandInterceptor> getCustomPreCommandInterceptorsTxRequired() { return customPreCommandInterceptorsTxRequired; } public ProcessEngineConfigurationImpl setCustomPreCommandInterceptorsTxRequired(List<CommandInterceptor> customPreCommandInterceptorsTxRequired) { this.customPreCommandInterceptorsTxRequired = customPreCommandInterceptorsTxRequired; return this; } public List<CommandInterceptor> getCustomPostCommandInterceptorsTxRequired() { return customPostCommandInterceptorsTxRequired; } public ProcessEngineConfigurationImpl setCustomPostCommandInterceptorsTxRequired(List<CommandInterceptor> customPostCommandInterceptorsTxRequired) { this.customPostCommandInterceptorsTxRequired = customPostCommandInterceptorsTxRequired; return this; } public List<CommandInterceptor> getCommandInterceptorsTxRequired() { return commandInterceptorsTxRequired; } public ProcessEngineConfigurationImpl setCommandInterceptorsTxRequired(List<CommandInterceptor> commandInterceptorsTxRequired) { this.commandInterceptorsTxRequired = commandInterceptorsTxRequired; return this; } public CommandExecutor getCommandExecutorTxRequired() { return commandExecutorTxRequired; } public ProcessEngineConfigurationImpl setCommandExecutorTxRequired(CommandExecutor commandExecutorTxRequired) { this.commandExecutorTxRequired = commandExecutorTxRequired; return this; } public List<CommandInterceptor> getCustomPreCommandInterceptorsTxRequiresNew() { return customPreCommandInterceptorsTxRequiresNew; } public ProcessEngineConfigurationImpl setCustomPreCommandInterceptorsTxRequiresNew(List<CommandInterceptor> customPreCommandInterceptorsTxRequiresNew) { this.customPreCommandInterceptorsTxRequiresNew = customPreCommandInterceptorsTxRequiresNew; return this; } public List<CommandInterceptor> getCustomPostCommandInterceptorsTxRequiresNew() { return customPostCommandInterceptorsTxRequiresNew; } public ProcessEngineConfigurationImpl setCustomPostCommandInterceptorsTxRequiresNew(List<CommandInterceptor> customPostCommandInterceptorsTxRequiresNew) { this.customPostCommandInterceptorsTxRequiresNew = customPostCommandInterceptorsTxRequiresNew; return this; } public List<CommandInterceptor> getCommandInterceptorsTxRequiresNew() { return commandInterceptorsTxRequiresNew; } public ProcessEngineConfigurationImpl setCommandInterceptorsTxRequiresNew(List<CommandInterceptor> commandInterceptorsTxRequiresNew) { this.commandInterceptorsTxRequiresNew = commandInterceptorsTxRequiresNew; return this; } public CommandExecutor getCommandExecutorTxRequiresNew() { return commandExecutorTxRequiresNew; } public ProcessEngineConfigurationImpl setCommandExecutorTxRequiresNew(CommandExecutor commandExecutorTxRequiresNew) { this.commandExecutorTxRequiresNew = commandExecutorTxRequiresNew; return this; } public RepositoryService getRepositoryService() { return repositoryService; } public ProcessEngineConfigurationImpl setRepositoryService(RepositoryService repositoryService) { this.repositoryService = repositoryService; return this; } public RuntimeService getRuntimeService() { return runtimeService; } public ProcessEngineConfigurationImpl setRuntimeService(RuntimeService runtimeService) { this.runtimeService = runtimeService; return this; } public HistoryService getHistoryService() { return historyService; } public ProcessEngineConfigurationImpl setHistoryService(HistoryService historyService) { this.historyService = historyService; return this; } public IdentityService getIdentityService() { return identityService; } public ProcessEngineConfigurationImpl setIdentityService(IdentityService identityService) { this.identityService = identityService; return this; } public TaskService getTaskService() { return taskService; } public ProcessEngineConfigurationImpl setTaskService(TaskService taskService) { this.taskService = taskService; return this; } public FormService getFormService() { return formService; } public ProcessEngineConfigurationImpl setFormService(FormService formService) { this.formService = formService; return this; } public ManagementService getManagementService() { return managementService; } public AuthorizationService getAuthorizationService() { return authorizationService; } public ProcessEngineConfigurationImpl setManagementService(ManagementService managementService) { this.managementService = managementService; return this; } public CaseService getCaseService() { return caseService; } public void setCaseService(CaseService caseService) { this.caseService = caseService; } public FilterService getFilterService() { return filterService; } public void setFilterService(FilterService filterService) { this.filterService = filterService; } public ExternalTaskService getExternalTaskService() { return externalTaskService; } public void setExternalTaskService(ExternalTaskService externalTaskService) { this.externalTaskService = externalTaskService; } public DecisionService getDecisionService() { return decisionService; } public OptimizeService getOptimizeService() { return optimizeService; } public void setDecisionService(DecisionService decisionService) { this.decisionService = decisionService; } public Map<Class<?>, SessionFactory> getSessionFactories() { return sessionFactories; } public ProcessEngineConfigurationImpl setSessionFactories(Map<Class<?>, SessionFactory> sessionFactories) { this.sessionFactories = sessionFactories; return this; } public List<Deployer> getDeployers() { return deployers; } public ProcessEngineConfigurationImpl setDeployers(List<Deployer> deployers) { this.deployers = deployers; return this; } public JobExecutor getJobExecutor() { return jobExecutor; } public ProcessEngineConfigurationImpl setJobExecutor(JobExecutor jobExecutor) { this.jobExecutor = jobExecutor; return this; } public PriorityProvider<JobDeclaration<?, ?>> getJobPriorityProvider() { return jobPriorityProvider; } public void setJobPriorityProvider(PriorityProvider<JobDeclaration<?, ?>> jobPriorityProvider) { this.jobPriorityProvider = jobPriorityProvider; } public PriorityProvider<ExternalTaskActivityBehavior> getExternalTaskPriorityProvider() { return externalTaskPriorityProvider; } public void setExternalTaskPriorityProvider(PriorityProvider<ExternalTaskActivityBehavior> externalTaskPriorityProvider) { this.externalTaskPriorityProvider = externalTaskPriorityProvider; } public IdGenerator getIdGenerator() { return idGenerator; } public ProcessEngineConfigurationImpl setIdGenerator(IdGenerator idGenerator) { this.idGenerator = idGenerator; return this; } public String getWsSyncFactoryClassName() { return wsSyncFactoryClassName; } public ProcessEngineConfigurationImpl setWsSyncFactoryClassName(String wsSyncFactoryClassName) { this.wsSyncFactoryClassName = wsSyncFactoryClassName; return this; } public Map<String, FormEngine> getFormEngines() { return formEngines; } public ProcessEngineConfigurationImpl setFormEngines(Map<String, FormEngine> formEngines) { this.formEngines = formEngines; return this; } public FormTypes getFormTypes() { return formTypes; } public ProcessEngineConfigurationImpl setFormTypes(FormTypes formTypes) { this.formTypes = formTypes; return this; } public ScriptingEngines getScriptingEngines() { return scriptingEngines; } public ProcessEngineConfigurationImpl setScriptingEngines(ScriptingEngines scriptingEngines) { this.scriptingEngines = scriptingEngines; return this; } public VariableSerializers getVariableSerializers() { return variableSerializers; } public VariableSerializerFactory getFallbackSerializerFactory() { return fallbackSerializerFactory; } public void setFallbackSerializerFactory(VariableSerializerFactory fallbackSerializerFactory) { this.fallbackSerializerFactory = fallbackSerializerFactory; } public ProcessEngineConfigurationImpl setVariableTypes(VariableSerializers variableSerializers) { this.variableSerializers = variableSerializers; return this; } public ExpressionManager getExpressionManager() { return expressionManager; } public ProcessEngineConfigurationImpl setExpressionManager(ExpressionManager expressionManager) { this.expressionManager = expressionManager; return this; } public BusinessCalendarManager getBusinessCalendarManager() { return businessCalendarManager; } public ProcessEngineConfigurationImpl setBusinessCalendarManager(BusinessCalendarManager businessCalendarManager) { this.businessCalendarManager = businessCalendarManager; return this; } public CommandContextFactory getCommandContextFactory() { return commandContextFactory; } public ProcessEngineConfigurationImpl setCommandContextFactory(CommandContextFactory commandContextFactory) { this.commandContextFactory = commandContextFactory; return this; } public TransactionContextFactory getTransactionContextFactory() { return transactionContextFactory; } public ProcessEngineConfigurationImpl setTransactionContextFactory(TransactionContextFactory transactionContextFactory) { this.transactionContextFactory = transactionContextFactory; return this; } public List<Deployer> getCustomPreDeployers() { return customPreDeployers; } public ProcessEngineConfigurationImpl setCustomPreDeployers(List<Deployer> customPreDeployers) { this.customPreDeployers = customPreDeployers; return this; } public List<Deployer> getCustomPostDeployers() { return customPostDeployers; } public ProcessEngineConfigurationImpl setCustomPostDeployers(List<Deployer> customPostDeployers) { this.customPostDeployers = customPostDeployers; return this; } public void setCacheFactory(CacheFactory cacheFactory) { this.cacheFactory = cacheFactory; } public void setCacheCapacity(int cacheCapacity) { this.cacheCapacity = cacheCapacity; } public void setEnableFetchProcessDefinitionDescription(boolean enableFetchProcessDefinitionDescription){ this.enableFetchProcessDefinitionDescription = enableFetchProcessDefinitionDescription; } public boolean getEnableFetchProcessDefinitionDescription() { return this.enableFetchProcessDefinitionDescription; } public Permission getDefaultUserPermissionForTask() { return defaultUserPermissionForTask; } public ProcessEngineConfigurationImpl setDefaultUserPermissionForTask(Permission defaultUserPermissionForTask) { this.defaultUserPermissionForTask = defaultUserPermissionForTask; return this; } public ProcessEngineConfigurationImpl setEnableHistoricInstancePermissions(boolean enable) { this.enableHistoricInstancePermissions = enable; return this; } public boolean isEnableHistoricInstancePermissions() { return enableHistoricInstancePermissions; } public Map<String, JobHandler> getJobHandlers() { return jobHandlers; } public ProcessEngineConfigurationImpl setJobHandlers(Map<String, JobHandler> jobHandlers) { this.jobHandlers = jobHandlers; return this; } public SqlSessionFactory getSqlSessionFactory() { return sqlSessionFactory; } public ProcessEngineConfigurationImpl setSqlSessionFactory(SqlSessionFactory sqlSessionFactory) { this.sqlSessionFactory = sqlSessionFactory; return this; } public DbSqlSessionFactory getDbSqlSessionFactory() { return dbSqlSessionFactory; } public ProcessEngineConfigurationImpl setDbSqlSessionFactory(DbSqlSessionFactory dbSqlSessionFactory) { this.dbSqlSessionFactory = dbSqlSessionFactory; return this; } public TransactionFactory getTransactionFactory() { return transactionFactory; } public ProcessEngineConfigurationImpl setTransactionFactory(TransactionFactory transactionFactory) { this.transactionFactory = transactionFactory; return this; } public List<SessionFactory> getCustomSessionFactories() { return customSessionFactories; } public ProcessEngineConfigurationImpl setCustomSessionFactories(List<SessionFactory> customSessionFactories) { this.customSessionFactories = customSessionFactories; return this; } public List<JobHandler> getCustomJobHandlers() { return customJobHandlers; } public ProcessEngineConfigurationImpl setCustomJobHandlers(List<JobHandler> customJobHandlers) { this.customJobHandlers = customJobHandlers; return this; } public List<FormEngine> getCustomFormEngines() { return customFormEngines; } public ProcessEngineConfigurationImpl setCustomFormEngines(List<FormEngine> customFormEngines) { this.customFormEngines = customFormEngines; return this; } public List<AbstractFormFieldType> getCustomFormTypes() { return customFormTypes; } public ProcessEngineConfigurationImpl setCustomFormTypes(List<AbstractFormFieldType> customFormTypes) { this.customFormTypes = customFormTypes; return this; } public List<TypedValueSerializer> getCustomPreVariableSerializers() { return customPreVariableSerializers; } public ProcessEngineConfigurationImpl setCustomPreVariableSerializers(List<TypedValueSerializer> customPreVariableTypes) { this.customPreVariableSerializers = customPreVariableTypes; return this; } public List<TypedValueSerializer> getCustomPostVariableSerializers() { return customPostVariableSerializers; } public ProcessEngineConfigurationImpl setCustomPostVariableSerializers(List<TypedValueSerializer> customPostVariableTypes) { this.customPostVariableSerializers = customPostVariableTypes; return this; } public List<BpmnParseListener> getCustomPreBPMNParseListeners() { return preParseListeners; } public void setCustomPreBPMNParseListeners(List<BpmnParseListener> preParseListeners) { this.preParseListeners = preParseListeners; } public List<BpmnParseListener> getCustomPostBPMNParseListeners() { return postParseListeners; } public void setCustomPostBPMNParseListeners(List<BpmnParseListener> postParseListeners) { this.postParseListeners = postParseListeners; } /** * @deprecated use {@link #getCustomPreBPMNParseListeners} instead. */ @Deprecated public List<BpmnParseListener> getPreParseListeners() { return preParseListeners; } /** * @deprecated use {@link #setCustomPreBPMNParseListeners} instead. */ @Deprecated public void setPreParseListeners(List<BpmnParseListener> preParseListeners) { this.preParseListeners = preParseListeners; } /** * @deprecated use {@link #getCustomPostBPMNParseListeners} instead. */ @Deprecated public List<BpmnParseListener> getPostParseListeners() { return postParseListeners; } /** * @deprecated use {@link #setCustomPostBPMNParseListeners} instead. */ @Deprecated public void setPostParseListeners(List<BpmnParseListener> postParseListeners) { this.postParseListeners = postParseListeners; } public List<CmmnTransformListener> getCustomPreCmmnTransformListeners() { return customPreCmmnTransformListeners; } public void setCustomPreCmmnTransformListeners(List<CmmnTransformListener> customPreCmmnTransformListeners) { this.customPreCmmnTransformListeners = customPreCmmnTransformListeners; } public List<CmmnTransformListener> getCustomPostCmmnTransformListeners() { return customPostCmmnTransformListeners; } public void setCustomPostCmmnTransformListeners(List<CmmnTransformListener> customPostCmmnTransformListeners) { this.customPostCmmnTransformListeners = customPostCmmnTransformListeners; } public Map<Object, Object> getBeans() { return beans; } public void setBeans(Map<Object, Object> beans) { this.beans = beans; } @Override public ProcessEngineConfigurationImpl setClassLoader(ClassLoader classLoader) { super.setClassLoader(classLoader); return this; } @Override public ProcessEngineConfigurationImpl setDatabaseType(String databaseType) { super.setDatabaseType(databaseType); return this; } @Override public ProcessEngineConfigurationImpl setDataSource(DataSource dataSource) { super.setDataSource(dataSource); return this; } @Override public ProcessEngineConfigurationImpl setDatabaseSchemaUpdate(String databaseSchemaUpdate) { super.setDatabaseSchemaUpdate(databaseSchemaUpdate); return this; } @Override public ProcessEngineConfigurationImpl setHistory(String history) { super.setHistory(history); return this; } @Override public ProcessEngineConfigurationImpl setIdBlockSize(int idBlockSize) { super.setIdBlockSize(idBlockSize); return this; } @Override public ProcessEngineConfigurationImpl setJdbcDriver(String jdbcDriver) { super.setJdbcDriver(jdbcDriver); return this; } @Override public ProcessEngineConfigurationImpl setJdbcPassword(String jdbcPassword) { super.setJdbcPassword(jdbcPassword); return this; } @Override public ProcessEngineConfigurationImpl setJdbcUrl(String jdbcUrl) { super.setJdbcUrl(jdbcUrl); return this; } @Override public ProcessEngineConfigurationImpl setJdbcUsername(String jdbcUsername) { super.setJdbcUsername(jdbcUsername); return this; } @Override public ProcessEngineConfigurationImpl setJobExecutorActivate(boolean jobExecutorActivate) { super.setJobExecutorActivate(jobExecutorActivate); return this; } @Override public ProcessEngineConfigurationImpl setMailServerDefaultFrom(String mailServerDefaultFrom) { super.setMailServerDefaultFrom(mailServerDefaultFrom); return this; } @Override public ProcessEngineConfigurationImpl setMailServerHost(String mailServerHost) { super.setMailServerHost(mailServerHost); return this; } @Override public ProcessEngineConfigurationImpl setMailServerPassword(String mailServerPassword) { super.setMailServerPassword(mailServerPassword); return this; } @Override public ProcessEngineConfigurationImpl setMailServerPort(int mailServerPort) { super.setMailServerPort(mailServerPort); return this; } @Override public ProcessEngineConfigurationImpl setMailServerUseTLS(boolean useTLS) { super.setMailServerUseTLS(useTLS); return this; } @Override public ProcessEngineConfigurationImpl setMailServerUsername(String mailServerUsername) { super.setMailServerUsername(mailServerUsername); return this; } @Override public ProcessEngineConfigurationImpl setJdbcMaxActiveConnections(int jdbcMaxActiveConnections) { super.setJdbcMaxActiveConnections(jdbcMaxActiveConnections); return this; } @Override public ProcessEngineConfigurationImpl setJdbcMaxCheckoutTime(int jdbcMaxCheckoutTime) { super.setJdbcMaxCheckoutTime(jdbcMaxCheckoutTime); return this; } @Override public ProcessEngineConfigurationImpl setJdbcMaxIdleConnections(int jdbcMaxIdleConnections) { super.setJdbcMaxIdleConnections(jdbcMaxIdleConnections); return this; } @Override public ProcessEngineConfigurationImpl setJdbcMaxWaitTime(int jdbcMaxWaitTime) { super.setJdbcMaxWaitTime(jdbcMaxWaitTime); return this; } @Override public ProcessEngineConfigurationImpl setTransactionsExternallyManaged(boolean transactionsExternallyManaged) { super.setTransactionsExternallyManaged(transactionsExternallyManaged); return this; } @Override public ProcessEngineConfigurationImpl setJpaEntityManagerFactory(Object jpaEntityManagerFactory) { this.jpaEntityManagerFactory = jpaEntityManagerFactory; return this; } @Override public ProcessEngineConfigurationImpl setJpaHandleTransaction(boolean jpaHandleTransaction) { this.jpaHandleTransaction = jpaHandleTransaction; return this; } @Override public ProcessEngineConfigurationImpl setJpaCloseEntityManager(boolean jpaCloseEntityManager) { this.jpaCloseEntityManager = jpaCloseEntityManager; return this; } @Override public ProcessEngineConfigurationImpl setJdbcPingEnabled(boolean jdbcPingEnabled) { this.jdbcPingEnabled = jdbcPingEnabled; return this; } @Override public ProcessEngineConfigurationImpl setJdbcPingQuery(String jdbcPingQuery) { this.jdbcPingQuery = jdbcPingQuery; return this; } @Override public ProcessEngineConfigurationImpl setJdbcPingConnectionNotUsedFor(int jdbcPingNotUsedFor) { this.jdbcPingConnectionNotUsedFor = jdbcPingNotUsedFor; return this; } public boolean isDbIdentityUsed() { return isDbIdentityUsed; } public void setDbIdentityUsed(boolean isDbIdentityUsed) { this.isDbIdentityUsed = isDbIdentityUsed; } public boolean isDbHistoryUsed() { return isDbHistoryUsed; } public void setDbHistoryUsed(boolean isDbHistoryUsed) { this.isDbHistoryUsed = isDbHistoryUsed; } public List<ResolverFactory> getResolverFactories() { return resolverFactories; } public void setResolverFactories(List<ResolverFactory> resolverFactories) { this.resolverFactories = resolverFactories; } public DeploymentCache getDeploymentCache() { return deploymentCache; } public void setDeploymentCache(DeploymentCache deploymentCache) { this.deploymentCache = deploymentCache; } public DeploymentHandlerFactory getDeploymentHandlerFactory() { return deploymentHandlerFactory; } public ProcessEngineConfigurationImpl setDeploymentHandlerFactory(DeploymentHandlerFactory deploymentHandlerFactory) { this.deploymentHandlerFactory = deploymentHandlerFactory; return this; } public ProcessEngineConfigurationImpl setDelegateInterceptor(DelegateInterceptor delegateInterceptor) { this.delegateInterceptor = delegateInterceptor; return this; } public DelegateInterceptor getDelegateInterceptor() { return delegateInterceptor; } public RejectedJobsHandler getCustomRejectedJobsHandler() { return customRejectedJobsHandler; } public ProcessEngineConfigurationImpl setCustomRejectedJobsHandler(RejectedJobsHandler customRejectedJobsHandler) { this.customRejectedJobsHandler = customRejectedJobsHandler; return this; } public EventHandler getEventHandler(String eventType) { return eventHandlers.get(eventType); } public void setEventHandlers(Map<String, EventHandler> eventHandlers) { this.eventHandlers = eventHandlers; } public Map<String, EventHandler> getEventHandlers() { return eventHandlers; } public List<EventHandler> getCustomEventHandlers() { return customEventHandlers; } public void setCustomEventHandlers(List<EventHandler> customEventHandlers) { this.customEventHandlers = customEventHandlers; } public FailedJobCommandFactory getFailedJobCommandFactory() { return failedJobCommandFactory; } public ProcessEngineConfigurationImpl setFailedJobCommandFactory(FailedJobCommandFactory failedJobCommandFactory) { this.failedJobCommandFactory = failedJobCommandFactory; return this; } /** * Allows configuring a database table prefix which is used for all runtime operations of the process engine. * For example, if you specify a prefix named 'PRE1.', activiti will query for executions in a table named * 'PRE1.ACT_RU_EXECUTION_'. * <p> * <p/> * <strong>NOTE: the prefix is not respected by automatic database schema management. If you use * {@link ProcessEngineConfiguration#DB_SCHEMA_UPDATE_CREATE_DROP} * or {@link ProcessEngineConfiguration#DB_SCHEMA_UPDATE_TRUE}, activiti will create the database tables * using the default names, regardless of the prefix configured here.</strong> * * @since 5.9 */ public ProcessEngineConfiguration setDatabaseTablePrefix(String databaseTablePrefix) { this.databaseTablePrefix = databaseTablePrefix; return this; } public String getDatabaseTablePrefix() { return databaseTablePrefix; } public boolean isCreateDiagramOnDeploy() { return isCreateDiagramOnDeploy; } public ProcessEngineConfiguration setCreateDiagramOnDeploy(boolean createDiagramOnDeploy) { this.isCreateDiagramOnDeploy = createDiagramOnDeploy; return this; } public String getDatabaseSchema() { return databaseSchema; } public void setDatabaseSchema(String databaseSchema) { this.databaseSchema = databaseSchema; } public DataSource getIdGeneratorDataSource() { return idGeneratorDataSource; } public void setIdGeneratorDataSource(DataSource idGeneratorDataSource) { this.idGeneratorDataSource = idGeneratorDataSource; } public String getIdGeneratorDataSourceJndiName() { return idGeneratorDataSourceJndiName; } public void setIdGeneratorDataSourceJndiName(String idGeneratorDataSourceJndiName) { this.idGeneratorDataSourceJndiName = idGeneratorDataSourceJndiName; } public ProcessApplicationManager getProcessApplicationManager() { return processApplicationManager; } public void setProcessApplicationManager(ProcessApplicationManager processApplicationManager) { this.processApplicationManager = processApplicationManager; } public CommandExecutor getCommandExecutorSchemaOperations() { return commandExecutorSchemaOperations; } public void setCommandExecutorSchemaOperations(CommandExecutor commandExecutorSchemaOperations) { this.commandExecutorSchemaOperations = commandExecutorSchemaOperations; } public CorrelationHandler getCorrelationHandler() { return correlationHandler; } public void setCorrelationHandler(CorrelationHandler correlationHandler) { this.correlationHandler = correlationHandler; } public ConditionHandler getConditionHandler() { return conditionHandler; } public void setConditionHandler(ConditionHandler conditionHandler) { this.conditionHandler = conditionHandler; } public ProcessEngineConfigurationImpl setHistoryEventHandler(HistoryEventHandler historyEventHandler) { this.historyEventHandler = historyEventHandler; return this; } public HistoryEventHandler getHistoryEventHandler() { return historyEventHandler; } public boolean isEnableDefaultDbHistoryEventHandler() { return enableDefaultDbHistoryEventHandler; } public void setEnableDefaultDbHistoryEventHandler(boolean enableDefaultDbHistoryEventHandler) { this.enableDefaultDbHistoryEventHandler = enableDefaultDbHistoryEventHandler; } public List<HistoryEventHandler> getCustomHistoryEventHandlers() { return customHistoryEventHandlers; } public void setCustomHistoryEventHandlers(List<HistoryEventHandler> customHistoryEventHandlers) { this.customHistoryEventHandlers = customHistoryEventHandlers; } public IncidentHandler getIncidentHandler(String incidentType) { return incidentHandlers.get(incidentType); } public Map<String, IncidentHandler> getIncidentHandlers() { return incidentHandlers; } public void setIncidentHandlers(Map<String, IncidentHandler> incidentHandlers) { this.incidentHandlers = incidentHandlers; } public List<IncidentHandler> getCustomIncidentHandlers() { return customIncidentHandlers; } public void setCustomIncidentHandlers(List<IncidentHandler> customIncidentHandlers) { this.customIncidentHandlers = customIncidentHandlers; } public Map<String, BatchJobHandler<?>> getBatchHandlers() { return batchHandlers; } public void setBatchHandlers(Map<String, BatchJobHandler<?>> batchHandlers) { this.batchHandlers = batchHandlers; } public List<BatchJobHandler<?>> getCustomBatchJobHandlers() { return customBatchJobHandlers; } public void setCustomBatchJobHandlers(List<BatchJobHandler<?>> customBatchJobHandlers) { this.customBatchJobHandlers = customBatchJobHandlers; } public int getBatchJobsPerSeed() { return batchJobsPerSeed; } public void setBatchJobsPerSeed(int batchJobsPerSeed) { this.batchJobsPerSeed = batchJobsPerSeed; } public Map<String, Integer> getInvocationsPerBatchJobByBatchType() { return invocationsPerBatchJobByBatchType; } public ProcessEngineConfigurationImpl setInvocationsPerBatchJobByBatchType(Map<String, Integer> invocationsPerBatchJobByBatchType) { this.invocationsPerBatchJobByBatchType = invocationsPerBatchJobByBatchType; return this; } public int getInvocationsPerBatchJob() { return invocationsPerBatchJob; } public void setInvocationsPerBatchJob(int invocationsPerBatchJob) { this.invocationsPerBatchJob = invocationsPerBatchJob; } public int getBatchPollTime() { return batchPollTime; } public void setBatchPollTime(int batchPollTime) { this.batchPollTime = batchPollTime; } public long getBatchJobPriority() { return batchJobPriority; } public void setBatchJobPriority(long batchJobPriority) { this.batchJobPriority = batchJobPriority; } public SessionFactory getIdentityProviderSessionFactory() { return identityProviderSessionFactory; } public void setIdentityProviderSessionFactory(SessionFactory identityProviderSessionFactory) { this.identityProviderSessionFactory = identityProviderSessionFactory; } public SaltGenerator getSaltGenerator() { return saltGenerator; } public void setSaltGenerator(SaltGenerator saltGenerator) { this.saltGenerator = saltGenerator; } public void setPasswordEncryptor(PasswordEncryptor passwordEncryptor) { this.passwordEncryptor = passwordEncryptor; } public PasswordEncryptor getPasswordEncryptor() { return passwordEncryptor; } public List<PasswordEncryptor> getCustomPasswordChecker() { return customPasswordChecker; } public void setCustomPasswordChecker(List<PasswordEncryptor> customPasswordChecker) { this.customPasswordChecker = customPasswordChecker; } public PasswordManager getPasswordManager() { return passwordManager; } public void setPasswordManager(PasswordManager passwordManager) { this.passwordManager = passwordManager; } public Set<String> getRegisteredDeployments() { return registeredDeployments; } public void setRegisteredDeployments(Set<String> registeredDeployments) { this.registeredDeployments = registeredDeployments; } public ResourceAuthorizationProvider getResourceAuthorizationProvider() { return resourceAuthorizationProvider; } public void setResourceAuthorizationProvider(ResourceAuthorizationProvider resourceAuthorizationProvider) { this.resourceAuthorizationProvider = resourceAuthorizationProvider; } public PermissionProvider getPermissionProvider() { return permissionProvider; } public void setPermissionProvider(PermissionProvider permissionProvider) { this.permissionProvider = permissionProvider; } public List<ProcessEnginePlugin> getProcessEnginePlugins() { return processEnginePlugins; } public void setProcessEnginePlugins(List<ProcessEnginePlugin> processEnginePlugins) { this.processEnginePlugins = processEnginePlugins; } public ProcessEngineConfigurationImpl setHistoryEventProducer(HistoryEventProducer historyEventProducer) { this.historyEventProducer = historyEventProducer; return this; } public HistoryEventProducer getHistoryEventProducer() { return historyEventProducer; } public ProcessEngineConfigurationImpl setCmmnHistoryEventProducer(CmmnHistoryEventProducer cmmnHistoryEventProducer) { this.cmmnHistoryEventProducer = cmmnHistoryEventProducer; return this; } public CmmnHistoryEventProducer getCmmnHistoryEventProducer() { return cmmnHistoryEventProducer; } public ProcessEngineConfigurationImpl setDmnHistoryEventProducer(DmnHistoryEventProducer dmnHistoryEventProducer) { this.dmnHistoryEventProducer = dmnHistoryEventProducer; return this; } public DmnHistoryEventProducer getDmnHistoryEventProducer() { return dmnHistoryEventProducer; } public Map<String, Class<? extends FormFieldValidator>> getCustomFormFieldValidators() { return customFormFieldValidators; } public void setCustomFormFieldValidators(Map<String, Class<? extends FormFieldValidator>> customFormFieldValidators) { this.customFormFieldValidators = customFormFieldValidators; } public void setFormValidators(FormValidators formValidators) { this.formValidators = formValidators; } public FormValidators getFormValidators() { return formValidators; } public boolean isExecutionTreePrefetchEnabled() { return isExecutionTreePrefetchEnabled; } public void setExecutionTreePrefetchEnabled(boolean isExecutionTreePrefetchingEnabled) { this.isExecutionTreePrefetchEnabled = isExecutionTreePrefetchingEnabled; } public ProcessEngineImpl getProcessEngine() { return processEngine; } /** * If set to true, the process engine will save all script variables (created from Java Script, Groovy ...) * as process variables. */ public void setAutoStoreScriptVariables(boolean autoStoreScriptVariables) { this.autoStoreScriptVariables = autoStoreScriptVariables; } /** * @return true if the process engine should save all script variables (created from Java Script, Groovy ...) * as process variables. */ public boolean isAutoStoreScriptVariables() { return autoStoreScriptVariables; } /** * If set to true, the process engine will attempt to pre-compile script sources at runtime * to optimize script task execution performance. */ public void setEnableScriptCompilation(boolean enableScriptCompilation) { this.enableScriptCompilation = enableScriptCompilation; } /** * @return true if compilation of script sources ins enabled. False otherwise. */ public boolean isEnableScriptCompilation() { return enableScriptCompilation; } public boolean isEnableGracefulDegradationOnContextSwitchFailure() { return enableGracefulDegradationOnContextSwitchFailure; } /** * <p>If set to true, the process engine will tolerate certain exceptions that may result * from the fact that it cannot switch to the context of a process application that has made * a deployment.</p> * <p> * <p>Affects the following scenarios:</p> * <ul> * <li><b>Determining job priorities</b>: uses a default priority in case an expression fails to evaluate</li> * </ul> */ public void setEnableGracefulDegradationOnContextSwitchFailure(boolean enableGracefulDegradationOnContextSwitchFailure) { this.enableGracefulDegradationOnContextSwitchFailure = enableGracefulDegradationOnContextSwitchFailure; } /** * @return true if the process engine acquires an exclusive lock when creating a deployment. */ public boolean isDeploymentLockUsed() { return isDeploymentLockUsed; } /** * If set to true, the process engine will acquire an exclusive lock when creating a deployment. * This ensures that {@link DeploymentBuilder#enableDuplicateFiltering()} works correctly in a clustered environment. */ public void setDeploymentLockUsed(boolean isDeploymentLockUsed) { this.isDeploymentLockUsed = isDeploymentLockUsed; } /** * @return true if deployment processing must be synchronized */ public boolean isDeploymentSynchronized() { return isDeploymentSynchronized; } /** * Sets if deployment processing must be synchronized. * @param deploymentSynchronized {@code true} when deployment must be synchronized, * {@code false} when several depoloyments may be processed in parallel */ public void setDeploymentSynchronized(boolean deploymentSynchronized) { isDeploymentSynchronized = deploymentSynchronized; } public boolean isCmmnEnabled() { return cmmnEnabled; } public void setCmmnEnabled(boolean cmmnEnabled) { this.cmmnEnabled = cmmnEnabled; } public boolean isDmnEnabled() { return dmnEnabled; } public void setDmnEnabled(boolean dmnEnabled) { this.dmnEnabled = dmnEnabled; } public ScriptFactory getScriptFactory() { return scriptFactory; } public ScriptingEnvironment getScriptingEnvironment() { return scriptingEnvironment; } public void setScriptFactory(ScriptFactory scriptFactory) { this.scriptFactory = scriptFactory; } public void setScriptingEnvironment(ScriptingEnvironment scriptingEnvironment) { this.scriptingEnvironment = scriptingEnvironment; } public List<ScriptEnvResolver> getEnvScriptResolvers() { return scriptEnvResolvers; } public void setEnvScriptResolvers(List<ScriptEnvResolver> scriptEnvResolvers) { this.scriptEnvResolvers = scriptEnvResolvers; } public ProcessEngineConfiguration setArtifactFactory(ArtifactFactory artifactFactory) { this.artifactFactory = artifactFactory; return this; } public ArtifactFactory getArtifactFactory() { return artifactFactory; } public String getDefaultSerializationFormat() { return defaultSerializationFormat; } public ProcessEngineConfigurationImpl setDefaultSerializationFormat(String defaultSerializationFormat) { this.defaultSerializationFormat = defaultSerializationFormat; return this; } public boolean isJavaSerializationFormatEnabled() { return javaSerializationFormatEnabled; } public void setJavaSerializationFormatEnabled(boolean javaSerializationFormatEnabled) { this.javaSerializationFormatEnabled = javaSerializationFormatEnabled; } public ProcessEngineConfigurationImpl setDefaultCharsetName(String defaultCharsetName) { this.defaultCharsetName = defaultCharsetName; return this; } public ProcessEngineConfigurationImpl setDefaultCharset(Charset defautlCharset) { this.defaultCharset = defautlCharset; return this; } public Charset getDefaultCharset() { return defaultCharset; } public boolean isDbEntityCacheReuseEnabled() { return isDbEntityCacheReuseEnabled; } public ProcessEngineConfigurationImpl setDbEntityCacheReuseEnabled(boolean isDbEntityCacheReuseEnabled) { this.isDbEntityCacheReuseEnabled = isDbEntityCacheReuseEnabled; return this; } public DbEntityCacheKeyMapping getDbEntityCacheKeyMapping() { return dbEntityCacheKeyMapping; } public ProcessEngineConfigurationImpl setDbEntityCacheKeyMapping(DbEntityCacheKeyMapping dbEntityCacheKeyMapping) { this.dbEntityCacheKeyMapping = dbEntityCacheKeyMapping; return this; } public ProcessEngineConfigurationImpl setCustomHistoryLevels(List<HistoryLevel> customHistoryLevels) { this.customHistoryLevels = customHistoryLevels; return this; } public List<HistoryLevel> getHistoryLevels() { return historyLevels; } public List<HistoryLevel> getCustomHistoryLevels() { return customHistoryLevels; } public boolean isInvokeCustomVariableListeners() { return isInvokeCustomVariableListeners; } public ProcessEngineConfigurationImpl setInvokeCustomVariableListeners(boolean isInvokeCustomVariableListeners) { this.isInvokeCustomVariableListeners = isInvokeCustomVariableListeners; return this; } public void close() { if (forceCloseMybatisConnectionPool && dataSource instanceof PooledDataSource) { // ACT-233: connection pool of Ibatis is not properely initialized if this is not called! ((PooledDataSource) dataSource).forceCloseAll(); } } public MetricsRegistry getMetricsRegistry() { return metricsRegistry; } public ProcessEngineConfigurationImpl setMetricsRegistry(MetricsRegistry metricsRegistry) { this.metricsRegistry = metricsRegistry; return this; } public ProcessEngineConfigurationImpl setMetricsEnabled(boolean isMetricsEnabled) { this.isMetricsEnabled = isMetricsEnabled; return this; } public boolean isMetricsEnabled() { return isMetricsEnabled; } public DbMetricsReporter getDbMetricsReporter() { return dbMetricsReporter; } public ProcessEngineConfigurationImpl setDbMetricsReporter(DbMetricsReporter dbMetricsReporter) { this.dbMetricsReporter = dbMetricsReporter; return this; } public boolean isDbMetricsReporterActivate() { return isDbMetricsReporterActivate; } public ProcessEngineConfigurationImpl setDbMetricsReporterActivate(boolean isDbMetricsReporterEnabled) { this.isDbMetricsReporterActivate = isDbMetricsReporterEnabled; return this; } /** * @deprecated use {@link #getHostnameProvider()} instead. */ @Deprecated public MetricsReporterIdProvider getMetricsReporterIdProvider() { return metricsReporterIdProvider; } /** * @deprecated use {@link #setHostnameProvider(HostnameProvider)} instead. */ @Deprecated public ProcessEngineConfigurationImpl setMetricsReporterIdProvider(MetricsReporterIdProvider metricsReporterIdProvider) { this.metricsReporterIdProvider = metricsReporterIdProvider; return this; } public String getHostname() { return hostname; } public ProcessEngineConfigurationImpl setHostname(String hostname) { this.hostname = hostname; return this; } public HostnameProvider getHostnameProvider() { return hostnameProvider; } public ProcessEngineConfigurationImpl setHostnameProvider(HostnameProvider hostnameProvider) { this.hostnameProvider = hostnameProvider; return this; } public boolean isEnableScriptEngineCaching() { return enableScriptEngineCaching; } public ProcessEngineConfigurationImpl setEnableScriptEngineCaching(boolean enableScriptEngineCaching) { this.enableScriptEngineCaching = enableScriptEngineCaching; return this; } public boolean isEnableFetchScriptEngineFromProcessApplication() { return enableFetchScriptEngineFromProcessApplication; } public ProcessEngineConfigurationImpl setEnableFetchScriptEngineFromProcessApplication(boolean enable) { this.enableFetchScriptEngineFromProcessApplication = enable; return this; } public boolean isEnableExpressionsInAdhocQueries() { return enableExpressionsInAdhocQueries; } public void setEnableExpressionsInAdhocQueries(boolean enableExpressionsInAdhocQueries) { this.enableExpressionsInAdhocQueries = enableExpressionsInAdhocQueries; } public boolean isEnableExpressionsInStoredQueries() { return enableExpressionsInStoredQueries; } public void setEnableExpressionsInStoredQueries(boolean enableExpressionsInStoredQueries) { this.enableExpressionsInStoredQueries = enableExpressionsInStoredQueries; } public boolean isEnableXxeProcessing() { return enableXxeProcessing; } public void setEnableXxeProcessing(boolean enableXxeProcessing) { this.enableXxeProcessing = enableXxeProcessing; } public ProcessEngineConfigurationImpl setBpmnStacktraceVerbose(boolean isBpmnStacktraceVerbose) { this.isBpmnStacktraceVerbose = isBpmnStacktraceVerbose; return this; } public boolean isBpmnStacktraceVerbose() { return this.isBpmnStacktraceVerbose; } public boolean isForceCloseMybatisConnectionPool() { return forceCloseMybatisConnectionPool; } public ProcessEngineConfigurationImpl setForceCloseMybatisConnectionPool(boolean forceCloseMybatisConnectionPool) { this.forceCloseMybatisConnectionPool = forceCloseMybatisConnectionPool; return this; } public boolean isRestrictUserOperationLogToAuthenticatedUsers() { return restrictUserOperationLogToAuthenticatedUsers; } public ProcessEngineConfigurationImpl setRestrictUserOperationLogToAuthenticatedUsers(boolean restrictUserOperationLogToAuthenticatedUsers) { this.restrictUserOperationLogToAuthenticatedUsers = restrictUserOperationLogToAuthenticatedUsers; return this; } public ProcessEngineConfigurationImpl setTenantIdProvider(TenantIdProvider tenantIdProvider) { this.tenantIdProvider = tenantIdProvider; return this; } public TenantIdProvider getTenantIdProvider() { return this.tenantIdProvider; } public void setMigrationActivityMatcher(MigrationActivityMatcher migrationActivityMatcher) { this.migrationActivityMatcher = migrationActivityMatcher; } public MigrationActivityMatcher getMigrationActivityMatcher() { return migrationActivityMatcher; } public void setCustomPreMigrationActivityValidators(List<MigrationActivityValidator> customPreMigrationActivityValidators) { this.customPreMigrationActivityValidators = customPreMigrationActivityValidators; } public List<MigrationActivityValidator> getCustomPreMigrationActivityValidators() { return customPreMigrationActivityValidators; } public void setCustomPostMigrationActivityValidators(List<MigrationActivityValidator> customPostMigrationActivityValidators) { this.customPostMigrationActivityValidators = customPostMigrationActivityValidators; } public List<MigrationActivityValidator> getCustomPostMigrationActivityValidators() { return customPostMigrationActivityValidators; } public List<MigrationActivityValidator> getDefaultMigrationActivityValidators() { List<MigrationActivityValidator> migrationActivityValidators = new ArrayList<>(); migrationActivityValidators.add(SupportedActivityValidator.INSTANCE); migrationActivityValidators.add(SupportedPassiveEventTriggerActivityValidator.INSTANCE); migrationActivityValidators.add(NoCompensationHandlerActivityValidator.INSTANCE); return migrationActivityValidators; } public void setMigrationInstructionGenerator(MigrationInstructionGenerator migrationInstructionGenerator) { this.migrationInstructionGenerator = migrationInstructionGenerator; } public MigrationInstructionGenerator getMigrationInstructionGenerator() { return migrationInstructionGenerator; } public void setMigrationInstructionValidators(List<MigrationInstructionValidator> migrationInstructionValidators) { this.migrationInstructionValidators = migrationInstructionValidators; } public List<MigrationInstructionValidator> getMigrationInstructionValidators() { return migrationInstructionValidators; } public void setCustomPostMigrationInstructionValidators(List<MigrationInstructionValidator> customPostMigrationInstructionValidators) { this.customPostMigrationInstructionValidators = customPostMigrationInstructionValidators; } public List<MigrationInstructionValidator> getCustomPostMigrationInstructionValidators() { return customPostMigrationInstructionValidators; } public void setCustomPreMigrationInstructionValidators(List<MigrationInstructionValidator> customPreMigrationInstructionValidators) { this.customPreMigrationInstructionValidators = customPreMigrationInstructionValidators; } public List<MigrationInstructionValidator> getCustomPreMigrationInstructionValidators() { return customPreMigrationInstructionValidators; } public List<MigrationInstructionValidator> getDefaultMigrationInstructionValidators() { List<MigrationInstructionValidator> migrationInstructionValidators = new ArrayList<>(); migrationInstructionValidators.add(new SameBehaviorInstructionValidator()); migrationInstructionValidators.add(new SameEventTypeValidator()); migrationInstructionValidators.add(new OnlyOnceMappedActivityInstructionValidator()); migrationInstructionValidators.add(new CannotAddMultiInstanceBodyValidator()); migrationInstructionValidators.add(new CannotAddMultiInstanceInnerActivityValidator()); migrationInstructionValidators.add(new CannotRemoveMultiInstanceInnerActivityValidator()); migrationInstructionValidators.add(new GatewayMappingValidator()); migrationInstructionValidators.add(new SameEventScopeInstructionValidator()); migrationInstructionValidators.add(new UpdateEventTriggersValidator()); migrationInstructionValidators.add(new AdditionalFlowScopeInstructionValidator()); migrationInstructionValidators.add(new ConditionalEventUpdateEventTriggerValidator()); return migrationInstructionValidators; } public void setMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators) { this.migratingActivityInstanceValidators = migratingActivityInstanceValidators; } public List<MigratingActivityInstanceValidator> getMigratingActivityInstanceValidators() { return migratingActivityInstanceValidators; } public void setCustomPostMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> customPostMigratingActivityInstanceValidators) { this.customPostMigratingActivityInstanceValidators = customPostMigratingActivityInstanceValidators; } public List<MigratingActivityInstanceValidator> getCustomPostMigratingActivityInstanceValidators() { return customPostMigratingActivityInstanceValidators; } public void setCustomPreMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> customPreMigratingActivityInstanceValidators) { this.customPreMigratingActivityInstanceValidators = customPreMigratingActivityInstanceValidators; } public List<MigratingActivityInstanceValidator> getCustomPreMigratingActivityInstanceValidators() { return customPreMigratingActivityInstanceValidators; } public List<MigratingTransitionInstanceValidator> getMigratingTransitionInstanceValidators() { return migratingTransitionInstanceValidators; } public List<MigratingCompensationInstanceValidator> getMigratingCompensationInstanceValidators() { return migratingCompensationInstanceValidators; } public List<MigratingActivityInstanceValidator> getDefaultMigratingActivityInstanceValidators() { List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators = new ArrayList<>(); migratingActivityInstanceValidators.add(new NoUnmappedLeafInstanceValidator()); migratingActivityInstanceValidators.add(new VariableConflictActivityInstanceValidator()); migratingActivityInstanceValidators.add(new SupportedActivityInstanceValidator()); return migratingActivityInstanceValidators; } public List<MigratingTransitionInstanceValidator> getDefaultMigratingTransitionInstanceValidators() { List<MigratingTransitionInstanceValidator> migratingTransitionInstanceValidators = new ArrayList<>(); migratingTransitionInstanceValidators.add(new NoUnmappedLeafInstanceValidator()); migratingTransitionInstanceValidators.add(new AsyncAfterMigrationValidator()); migratingTransitionInstanceValidators.add(new AsyncProcessStartMigrationValidator()); migratingTransitionInstanceValidators.add(new AsyncMigrationValidator()); return migratingTransitionInstanceValidators; } public List<CommandChecker> getCommandCheckers() { return commandCheckers; } public void setCommandCheckers(List<CommandChecker> commandCheckers) { this.commandCheckers = commandCheckers; } public ProcessEngineConfigurationImpl setUseSharedSqlSessionFactory(boolean isUseSharedSqlSessionFactory) { this.isUseSharedSqlSessionFactory = isUseSharedSqlSessionFactory; return this; } public boolean isUseSharedSqlSessionFactory() { return isUseSharedSqlSessionFactory; } public boolean getDisableStrictCallActivityValidation() { return disableStrictCallActivityValidation; } public void setDisableStrictCallActivityValidation(boolean disableStrictCallActivityValidation) { this.disableStrictCallActivityValidation = disableStrictCallActivityValidation; } public String getHistoryCleanupBatchWindowStartTime() { return historyCleanupBatchWindowStartTime; } public void setHistoryCleanupBatchWindowStartTime(String historyCleanupBatchWindowStartTime) { this.historyCleanupBatchWindowStartTime = historyCleanupBatchWindowStartTime; } public String getHistoryCleanupBatchWindowEndTime() { return historyCleanupBatchWindowEndTime; } public void setHistoryCleanupBatchWindowEndTime(String historyCleanupBatchWindowEndTime) { this.historyCleanupBatchWindowEndTime = historyCleanupBatchWindowEndTime; } public String getMondayHistoryCleanupBatchWindowStartTime() { return mondayHistoryCleanupBatchWindowStartTime; } public void setMondayHistoryCleanupBatchWindowStartTime(String mondayHistoryCleanupBatchWindowStartTime) { this.mondayHistoryCleanupBatchWindowStartTime = mondayHistoryCleanupBatchWindowStartTime; } public String getMondayHistoryCleanupBatchWindowEndTime() { return mondayHistoryCleanupBatchWindowEndTime; } public void setMondayHistoryCleanupBatchWindowEndTime(String mondayHistoryCleanupBatchWindowEndTime) { this.mondayHistoryCleanupBatchWindowEndTime = mondayHistoryCleanupBatchWindowEndTime; } public String getTuesdayHistoryCleanupBatchWindowStartTime() { return tuesdayHistoryCleanupBatchWindowStartTime; } public void setTuesdayHistoryCleanupBatchWindowStartTime(String tuesdayHistoryCleanupBatchWindowStartTime) { this.tuesdayHistoryCleanupBatchWindowStartTime = tuesdayHistoryCleanupBatchWindowStartTime; } public String getTuesdayHistoryCleanupBatchWindowEndTime() { return tuesdayHistoryCleanupBatchWindowEndTime; } public void setTuesdayHistoryCleanupBatchWindowEndTime(String tuesdayHistoryCleanupBatchWindowEndTime) { this.tuesdayHistoryCleanupBatchWindowEndTime = tuesdayHistoryCleanupBatchWindowEndTime; } public String getWednesdayHistoryCleanupBatchWindowStartTime() { return wednesdayHistoryCleanupBatchWindowStartTime; } public void setWednesdayHistoryCleanupBatchWindowStartTime(String wednesdayHistoryCleanupBatchWindowStartTime) { this.wednesdayHistoryCleanupBatchWindowStartTime = wednesdayHistoryCleanupBatchWindowStartTime; } public String getWednesdayHistoryCleanupBatchWindowEndTime() { return wednesdayHistoryCleanupBatchWindowEndTime; } public void setWednesdayHistoryCleanupBatchWindowEndTime(String wednesdayHistoryCleanupBatchWindowEndTime) { this.wednesdayHistoryCleanupBatchWindowEndTime = wednesdayHistoryCleanupBatchWindowEndTime; } public String getThursdayHistoryCleanupBatchWindowStartTime() { return thursdayHistoryCleanupBatchWindowStartTime; } public void setThursdayHistoryCleanupBatchWindowStartTime(String thursdayHistoryCleanupBatchWindowStartTime) { this.thursdayHistoryCleanupBatchWindowStartTime = thursdayHistoryCleanupBatchWindowStartTime; } public String getThursdayHistoryCleanupBatchWindowEndTime() { return thursdayHistoryCleanupBatchWindowEndTime; } public void setThursdayHistoryCleanupBatchWindowEndTime(String thursdayHistoryCleanupBatchWindowEndTime) { this.thursdayHistoryCleanupBatchWindowEndTime = thursdayHistoryCleanupBatchWindowEndTime; } public String getFridayHistoryCleanupBatchWindowStartTime() { return fridayHistoryCleanupBatchWindowStartTime; } public void setFridayHistoryCleanupBatchWindowStartTime(String fridayHistoryCleanupBatchWindowStartTime) { this.fridayHistoryCleanupBatchWindowStartTime = fridayHistoryCleanupBatchWindowStartTime; } public String getFridayHistoryCleanupBatchWindowEndTime() { return fridayHistoryCleanupBatchWindowEndTime; } public void setFridayHistoryCleanupBatchWindowEndTime(String fridayHistoryCleanupBatchWindowEndTime) { this.fridayHistoryCleanupBatchWindowEndTime = fridayHistoryCleanupBatchWindowEndTime; } public String getSaturdayHistoryCleanupBatchWindowStartTime() { return saturdayHistoryCleanupBatchWindowStartTime; } public void setSaturdayHistoryCleanupBatchWindowStartTime(String saturdayHistoryCleanupBatchWindowStartTime) { this.saturdayHistoryCleanupBatchWindowStartTime = saturdayHistoryCleanupBatchWindowStartTime; } public String getSaturdayHistoryCleanupBatchWindowEndTime() { return saturdayHistoryCleanupBatchWindowEndTime; } public void setSaturdayHistoryCleanupBatchWindowEndTime(String saturdayHistoryCleanupBatchWindowEndTime) { this.saturdayHistoryCleanupBatchWindowEndTime = saturdayHistoryCleanupBatchWindowEndTime; } public String getSundayHistoryCleanupBatchWindowStartTime() { return sundayHistoryCleanupBatchWindowStartTime; } public void setSundayHistoryCleanupBatchWindowStartTime(String sundayHistoryCleanupBatchWindowStartTime) { this.sundayHistoryCleanupBatchWindowStartTime = sundayHistoryCleanupBatchWindowStartTime; } public String getSundayHistoryCleanupBatchWindowEndTime() { return sundayHistoryCleanupBatchWindowEndTime; } public void setSundayHistoryCleanupBatchWindowEndTime(String sundayHistoryCleanupBatchWindowEndTime) { this.sundayHistoryCleanupBatchWindowEndTime = sundayHistoryCleanupBatchWindowEndTime; } public Date getHistoryCleanupBatchWindowStartTimeAsDate() { return historyCleanupBatchWindowStartTimeAsDate; } public void setHistoryCleanupBatchWindowStartTimeAsDate(Date historyCleanupBatchWindowStartTimeAsDate) { this.historyCleanupBatchWindowStartTimeAsDate = historyCleanupBatchWindowStartTimeAsDate; } public void setHistoryCleanupBatchWindowEndTimeAsDate(Date historyCleanupBatchWindowEndTimeAsDate) { this.historyCleanupBatchWindowEndTimeAsDate = historyCleanupBatchWindowEndTimeAsDate; } public Date getHistoryCleanupBatchWindowEndTimeAsDate() { return historyCleanupBatchWindowEndTimeAsDate; } public Map<Integer, BatchWindowConfiguration> getHistoryCleanupBatchWindows() { return historyCleanupBatchWindows; } public void setHistoryCleanupBatchWindows(Map<Integer, BatchWindowConfiguration> historyCleanupBatchWindows) { this.historyCleanupBatchWindows = historyCleanupBatchWindows; } public int getHistoryCleanupBatchSize() { return historyCleanupBatchSize; } public void setHistoryCleanupBatchSize(int historyCleanupBatchSize) { this.historyCleanupBatchSize = historyCleanupBatchSize; } public int getHistoryCleanupBatchThreshold() { return historyCleanupBatchThreshold; } public void setHistoryCleanupBatchThreshold(int historyCleanupBatchThreshold) { this.historyCleanupBatchThreshold = historyCleanupBatchThreshold; } public boolean isHistoryCleanupMetricsEnabled() { return historyCleanupMetricsEnabled; } public void setHistoryCleanupMetricsEnabled(boolean historyCleanupMetricsEnabled) { this.historyCleanupMetricsEnabled = historyCleanupMetricsEnabled; } public boolean isHistoryCleanupEnabled() { return historyCleanupEnabled; } public ProcessEngineConfigurationImpl setHistoryCleanupEnabled(boolean historyCleanupEnabled) { this.historyCleanupEnabled = historyCleanupEnabled; return this; } public String getHistoryTimeToLive() { return historyTimeToLive; } public void setHistoryTimeToLive(String historyTimeToLive) { this.historyTimeToLive = historyTimeToLive; } public String getBatchOperationHistoryTimeToLive() { return batchOperationHistoryTimeToLive; } public int getHistoryCleanupDegreeOfParallelism() { return historyCleanupDegreeOfParallelism; } public void setHistoryCleanupDegreeOfParallelism(int historyCleanupDegreeOfParallelism) { this.historyCleanupDegreeOfParallelism = historyCleanupDegreeOfParallelism; } public void setBatchOperationHistoryTimeToLive(String batchOperationHistoryTimeToLive) { this.batchOperationHistoryTimeToLive = batchOperationHistoryTimeToLive; } public Map<String, String> getBatchOperationsForHistoryCleanup() { return batchOperationsForHistoryCleanup; } public void setBatchOperationsForHistoryCleanup(Map<String, String> batchOperationsForHistoryCleanup) { this.batchOperationsForHistoryCleanup = batchOperationsForHistoryCleanup; } public Map<String, Integer> getParsedBatchOperationsForHistoryCleanup() { return parsedBatchOperationsForHistoryCleanup; } public void setParsedBatchOperationsForHistoryCleanup(Map<String, Integer> parsedBatchOperationsForHistoryCleanup) { this.parsedBatchOperationsForHistoryCleanup = parsedBatchOperationsForHistoryCleanup; } public BatchWindowManager getBatchWindowManager() { return batchWindowManager; } public void setBatchWindowManager(BatchWindowManager batchWindowManager) { this.batchWindowManager = batchWindowManager; } public HistoryRemovalTimeProvider getHistoryRemovalTimeProvider() { return historyRemovalTimeProvider; } public ProcessEngineConfigurationImpl setHistoryRemovalTimeProvider(HistoryRemovalTimeProvider removalTimeProvider) { historyRemovalTimeProvider = removalTimeProvider; return this; } public String getHistoryRemovalTimeStrategy() { return historyRemovalTimeStrategy; } public ProcessEngineConfigurationImpl setHistoryRemovalTimeStrategy(String removalTimeStrategy) { historyRemovalTimeStrategy = removalTimeStrategy; return this; } public String getHistoryCleanupStrategy() { return historyCleanupStrategy; } public ProcessEngineConfigurationImpl setHistoryCleanupStrategy(String historyCleanupStrategy) { this.historyCleanupStrategy = historyCleanupStrategy; return this; } public int getFailedJobListenerMaxRetries() { return failedJobListenerMaxRetries; } public void setFailedJobListenerMaxRetries(int failedJobListenerMaxRetries) { this.failedJobListenerMaxRetries = failedJobListenerMaxRetries; } public String getFailedJobRetryTimeCycle() { return failedJobRetryTimeCycle; } public void setFailedJobRetryTimeCycle(String failedJobRetryTimeCycle) { this.failedJobRetryTimeCycle = failedJobRetryTimeCycle; } public int getLoginMaxAttempts() { return loginMaxAttempts; } public void setLoginMaxAttempts(int loginMaxAttempts) { this.loginMaxAttempts = loginMaxAttempts; } public int getLoginDelayFactor() { return loginDelayFactor; } public void setLoginDelayFactor(int loginDelayFactor) { this.loginDelayFactor = loginDelayFactor; } public int getLoginDelayMaxTime() { return loginDelayMaxTime; } public void setLoginDelayMaxTime(int loginDelayMaxTime) { this.loginDelayMaxTime = loginDelayMaxTime; } public int getLoginDelayBase() { return loginDelayBase; } public void setLoginDelayBase(int loginInitialDelay) { this.loginDelayBase = loginInitialDelay; } public List<String> getAdminGroups() { return adminGroups; } public void setAdminGroups(List<String> adminGroups) { this.adminGroups = adminGroups; } public List<String> getAdminUsers() { return adminUsers; } public void setAdminUsers(List<String> adminUsers) { this.adminUsers = adminUsers; } public int getQueryMaxResultsLimit() { return queryMaxResultsLimit; } public ProcessEngineConfigurationImpl setQueryMaxResultsLimit(int queryMaxResultsLimit) { this.queryMaxResultsLimit = queryMaxResultsLimit; return this; } public String getLoggingContextActivityId() { return loggingContextActivityId; } public ProcessEngineConfigurationImpl setLoggingContextActivityId(String loggingContextActivityId) { this.loggingContextActivityId = loggingContextActivityId; return this; } public String getLoggingContextApplicationName() { return loggingContextApplicationName; } public ProcessEngineConfigurationImpl setLoggingContextApplicationName(String loggingContextApplicationName) { this.loggingContextApplicationName = loggingContextApplicationName; return this; } public String getLoggingContextBusinessKey() { return loggingContextBusinessKey; } public ProcessEngineConfigurationImpl setLoggingContextBusinessKey(String loggingContextBusinessKey) { this.loggingContextBusinessKey = loggingContextBusinessKey; return this; } public String getLoggingContextProcessDefinitionId() { return loggingContextProcessDefinitionId; } public ProcessEngineConfigurationImpl setLoggingContextProcessDefinitionId(String loggingContextProcessDefinitionId) { this.loggingContextProcessDefinitionId = loggingContextProcessDefinitionId; return this; } public String getLoggingContextProcessInstanceId() { return loggingContextProcessInstanceId; } public ProcessEngineConfigurationImpl setLoggingContextProcessInstanceId(String loggingContextProcessInstanceId) { this.loggingContextProcessInstanceId = loggingContextProcessInstanceId; return this; } public String getLoggingContextTenantId() { return loggingContextTenantId; } public ProcessEngineConfigurationImpl setLoggingContextTenantId(String loggingContextTenantId) { this.loggingContextTenantId = loggingContextTenantId; return this; } public List<FeelCustomFunctionProvider> getDmnFeelCustomFunctionProviders() { return dmnFeelCustomFunctionProviders; } public ProcessEngineConfigurationImpl setDmnFeelCustomFunctionProviders(List<FeelCustomFunctionProvider> dmnFeelCustomFunctionProviders) { this.dmnFeelCustomFunctionProviders = dmnFeelCustomFunctionProviders; return this; } public boolean isDmnFeelEnableLegacyBehavior() { return dmnFeelEnableLegacyBehavior; } public ProcessEngineConfigurationImpl setDmnFeelEnableLegacyBehavior(boolean dmnFeelEnableLegacyBehavior) { this.dmnFeelEnableLegacyBehavior = dmnFeelEnableLegacyBehavior; return this; } public boolean isTelemetryEnabled() { return telemetryEnabled; } public ProcessEngineConfigurationImpl setTelemetryEnabled(boolean telemetryEnabled) { this.telemetryEnabled = telemetryEnabled; return this; } public String getTelemetryEndpoint() { return telemetryEndpoint; } public ProcessEngineConfigurationImpl setTelemetryEndpoint(String telemetryEndpoint) { this.telemetryEndpoint = telemetryEndpoint; return this; } }
1
10,534
I would prefer an active verb for this property, e.g. `initializeTelemetry`. The reason is that this property refers to something the engine does once on startup. Other properties that use passive voice (e.g. `authorizationEnabled`) refer to a state of the engine during its lifetime.
camunda-camunda-bpm-platform
java
@@ -105,5 +105,10 @@ namespace Microsoft.Rest.Generator.NodeJS return requiredParams.ToString(); } } + + public bool ContainsTimeSpan + { + get { return this.Methods.FirstOrDefault(m => m.Parameters.FirstOrDefault(p => p.Type == PrimaryType.TimeSpan) != null) != null; } + } } }
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. using System.Collections.Generic; using System.Globalization; using System.Linq; using Microsoft.Rest.Generator.ClientModel; using Microsoft.Rest.Generator.Utilities; using System.Text; using Microsoft.Rest.Generator.NodeJS.TemplateModels; namespace Microsoft.Rest.Generator.NodeJS { public class ServiceClientTemplateModel : ServiceClient { public ServiceClientTemplateModel(ServiceClient serviceClient) { this.LoadFrom(serviceClient); MethodTemplateModels = new List<MethodTemplateModel>(); ModelTemplateModels = new List<ModelTemplateModel>(); Methods.Where(m => m.Group == null) .ForEach(m => MethodTemplateModels.Add(new MethodTemplateModel(m, serviceClient))); ModelTypes.ForEach(m => ModelTemplateModels.Add(new ModelTemplateModel(m, serviceClient))); } public List<MethodTemplateModel> MethodTemplateModels { get; private set; } public List<ModelTemplateModel> ModelTemplateModels { get; private set; } public virtual IEnumerable<MethodGroupTemplateModel> MethodGroupModels { get { return MethodGroups.Select(mg => new MethodGroupTemplateModel(this, mg)); } } public string PolymorphicDictionary { get { IndentedStringBuilder builder = new IndentedStringBuilder(IndentedStringBuilder.TwoSpaces); var polymorphicTypes = ModelTemplateModels.Where(m => m.IsPolymorphic); for (int i = 0; i < polymorphicTypes.Count(); i++ ) { builder.Append(string.Format(CultureInfo.InvariantCulture, "'{0}' : exports.{1}", polymorphicTypes.ElementAt(i).SerializedName, polymorphicTypes.ElementAt(i).Name)); if(i == polymorphicTypes.Count() -1) { builder.AppendLine(); } else { builder.AppendLine(","); } } return builder.ToString(); } } public string RequiredConstructorParameters { get { var requireParams = new List<string>(); this.Properties.Where(p => p.IsRequired) .ForEach(p => requireParams.Add(p.Name.ToCamelCase())); requireParams.Add("baseUri"); return string.Join(", ", requireParams); } } /// <summary> /// Return the service client constructor required parameters, in TypeScript syntax. /// </summary> public string RequiredConstructorParametersTS { get { StringBuilder requiredParams = new StringBuilder(); bool first = true; foreach (var p in this.Properties) { if (! p.IsRequired) continue; if (!first) requiredParams.Append(", "); requiredParams.Append(p.Name); requiredParams.Append(": "); requiredParams.Append(p.Type.TSType(false)); first = false; } if (!first) requiredParams.Append(", "); requiredParams.Append("baseUri: string"); return requiredParams.ToString(); } } } }
1
21,151
please use new line to maintain reasonable line width
Azure-autorest
java
@@ -67,6 +67,9 @@ func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) { var fileSize uint64 ta := s.createTag(w, r) + if ta == nil { + return + } // Add the tag to the context r = r.WithContext(context.WithValue(r.Context(), tags.TagsContextKey{}, ta))
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package api import ( "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "io" "io/ioutil" "mime" "mime/multipart" "net/http" "os" "strconv" "strings" "github.com/ethersphere/bee/pkg/collection/entry" "github.com/ethersphere/bee/pkg/encryption" "github.com/ethersphere/bee/pkg/file" "github.com/ethersphere/bee/pkg/file/joiner" "github.com/ethersphere/bee/pkg/file/splitter" "github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/tags" "github.com/gorilla/mux" ) const ( defaultBufSize = 4096 ) const ( multiPartFormData = "multipart/form-data" EncryptHeader = "swarm-encrypt" ) type targetsContextKey struct{} // fileUploadResponse is returned when an HTTP request to upload a file is successful type fileUploadResponse struct { Reference swarm.Address `json:"reference"` } // fileUploadHandler uploads the file and its metadata supplied as: // - multipart http message // - other content types as complete file body func (s *server) fileUploadHandler(w http.ResponseWriter, r *http.Request) { toEncrypt := strings.ToLower(r.Header.Get(EncryptHeader)) == "true" contentType := r.Header.Get("Content-Type") mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { s.Logger.Debugf("file upload: parse content type header %q: %v", contentType, err) s.Logger.Errorf("file upload: parse content type header %q", contentType) jsonhttp.BadRequest(w, "invalid content-type header") return } var reader io.Reader var fileName, contentLength string var fileSize uint64 ta := s.createTag(w, r) // Add the tag to the context r = r.WithContext(context.WithValue(r.Context(), tags.TagsContextKey{}, ta)) ctx := r.Context() if mediaType == multiPartFormData { mr := multipart.NewReader(r.Body, params["boundary"]) // read only the first part, as only one file upload is supported part, err := mr.NextPart() if err != nil { s.Logger.Debugf("file upload: read multipart: %v", err) s.Logger.Error("file upload: read multipart") jsonhttp.BadRequest(w, "invalid multipart/form-data") return } // try to find filename // 1) in part header params // 2) as formname // 3) file reference hash (after uploading the file) if fileName = part.FileName(); fileName == "" { fileName = part.FormName() } // then find out content type contentType = part.Header.Get("Content-Type") if contentType == "" { br := bufio.NewReader(part) buf, err := br.Peek(512) if err != nil && err != io.EOF { s.Logger.Debugf("file upload: read content type, file %q: %v", fileName, err) s.Logger.Errorf("file upload: read content type, file %q", fileName) jsonhttp.BadRequest(w, "error reading content type") return } contentType = http.DetectContentType(buf) reader = br } else { reader = part } contentLength = part.Header.Get("Content-Length") } else { fileName = r.URL.Query().Get("name") contentLength = r.Header.Get("Content-Length") reader = r.Body } if contentLength != "" { fileSize, err = strconv.ParseUint(contentLength, 10, 64) if err != nil { s.Logger.Debugf("file upload: content length, file %q: %v", fileName, err) s.Logger.Errorf("file upload: content length, file %q", fileName) jsonhttp.BadRequest(w, "invalid content length header") return } } else { // copy the part to a tmp file to get its size tmp, err := ioutil.TempFile("", "bee-multipart") if err != nil { s.Logger.Debugf("file upload: create temporary file: %v", err) s.Logger.Errorf("file upload: create temporary file") jsonhttp.InternalServerError(w, nil) return } defer os.Remove(tmp.Name()) defer tmp.Close() n, err := io.Copy(tmp, reader) if err != nil { s.Logger.Debugf("file upload: write temporary file: %v", err) s.Logger.Error("file upload: write temporary file") jsonhttp.InternalServerError(w, nil) return } if _, err := tmp.Seek(0, io.SeekStart); err != nil { s.Logger.Debugf("file upload: seek to beginning of temporary file: %v", err) s.Logger.Error("file upload: seek to beginning of temporary file") jsonhttp.InternalServerError(w, nil) return } fileSize = uint64(n) reader = tmp } // first store the file and get its reference sp := splitter.NewSimpleSplitter(s.Storer) fr, err := file.SplitWriteAll(ctx, sp, reader, int64(fileSize), toEncrypt) if err != nil { s.Logger.Debugf("file upload: file store, file %q: %v", fileName, err) s.Logger.Errorf("file upload: file store, file %q", fileName) jsonhttp.InternalServerError(w, "could not store file data") return } // If filename is still empty, use the file hash as the filename if fileName == "" { fileName = fr.String() } // then store the metadata and get its reference m := entry.NewMetadata(fileName) m.MimeType = contentType metadataBytes, err := json.Marshal(m) if err != nil { s.Logger.Debugf("file upload: metadata marshal, file %q: %v", fileName, err) s.Logger.Errorf("file upload: metadata marshal, file %q", fileName) jsonhttp.InternalServerError(w, "metadata marshal error") return } sp = splitter.NewSimpleSplitter(s.Storer) mr, err := file.SplitWriteAll(ctx, sp, bytes.NewReader(metadataBytes), int64(len(metadataBytes)), toEncrypt) if err != nil { s.Logger.Debugf("file upload: metadata store, file %q: %v", fileName, err) s.Logger.Errorf("file upload: metadata store, file %q", fileName) jsonhttp.InternalServerError(w, "could not store metadata") return } // now join both references (mr,fr) to create an entry and store it. entrie := entry.New(fr, mr) fileEntryBytes, err := entrie.MarshalBinary() if err != nil { s.Logger.Debugf("file upload: entry marshal, file %q: %v", fileName, err) s.Logger.Errorf("file upload: entry marshal, file %q", fileName) jsonhttp.InternalServerError(w, "entry marshal error") return } sp = splitter.NewSimpleSplitter(s.Storer) reference, err := file.SplitWriteAll(ctx, sp, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes)), toEncrypt) if err != nil { s.Logger.Debugf("file upload: entry store, file %q: %v", fileName, err) s.Logger.Errorf("file upload: entry store, file %q", fileName) jsonhttp.InternalServerError(w, "could not store entry") return } ta.DoneSplit(reference) w.Header().Set("ETag", fmt.Sprintf("%q", reference.String())) w.Header().Set(TagHeaderUid, fmt.Sprint(ta.Uid)) w.Header().Set("Access-Control-Expose-Headers", TagHeaderUid) jsonhttp.OK(w, fileUploadResponse{ Reference: reference, }) } // fileUploadInfo contains the data for a file to be uploaded type fileUploadInfo struct { name string // file name size int64 // file size contentType string reader io.Reader } // storeFile uploads the given file and returns its reference // this function was extracted from `fileUploadHandler` and should eventually replace its current code func storeFile(ctx context.Context, fileInfo *fileUploadInfo, s storage.Storer) (swarm.Address, error) { v := ctx.Value(toEncryptContextKey{}) toEncrypt, _ := v.(bool) // default is false // first store the file and get its reference sp := splitter.NewSimpleSplitter(s) fr, err := file.SplitWriteAll(ctx, sp, fileInfo.reader, fileInfo.size, toEncrypt) if err != nil { return swarm.ZeroAddress, fmt.Errorf("split file error: %w", err) } // if filename is still empty, use the file hash as the filename if fileInfo.name == "" { fileInfo.name = fr.String() } // then store the metadata and get its reference m := entry.NewMetadata(fileInfo.name) m.MimeType = fileInfo.contentType metadataBytes, err := json.Marshal(m) if err != nil { return swarm.ZeroAddress, fmt.Errorf("metadata marshal error: %w", err) } sp = splitter.NewSimpleSplitter(s) mr, err := file.SplitWriteAll(ctx, sp, bytes.NewReader(metadataBytes), int64(len(metadataBytes)), toEncrypt) if err != nil { return swarm.ZeroAddress, fmt.Errorf("split metadata error: %w", err) } // now join both references (mr, fr) to create an entry and store it e := entry.New(fr, mr) fileEntryBytes, err := e.MarshalBinary() if err != nil { return swarm.ZeroAddress, fmt.Errorf("entry marshal error: %w", err) } sp = splitter.NewSimpleSplitter(s) reference, err := file.SplitWriteAll(ctx, sp, bytes.NewReader(fileEntryBytes), int64(len(fileEntryBytes)), toEncrypt) if err != nil { return swarm.ZeroAddress, fmt.Errorf("split entry error: %w", err) } return reference, nil } // fileDownloadHandler downloads the file given the entry's reference. func (s *server) fileDownloadHandler(w http.ResponseWriter, r *http.Request) { addr := mux.Vars(r)["addr"] address, err := swarm.ParseHexAddress(addr) if err != nil { s.Logger.Debugf("file download: parse file address %s: %v", addr, err) s.Logger.Errorf("file download: parse file address %s", addr) jsonhttp.BadRequest(w, "invalid file address") return } toDecrypt := len(address.Bytes()) == (swarm.HashSize + encryption.KeyLength) targets := r.URL.Query().Get("targets") r = r.WithContext(context.WithValue(r.Context(), targetsContextKey{}, targets)) // read entry. j := joiner.NewSimpleJoiner(s.Storer) buf := bytes.NewBuffer(nil) _, err = file.JoinReadAll(r.Context(), j, address, buf, toDecrypt) if err != nil { s.Logger.Debugf("file download: read entry %s: %v", addr, err) s.Logger.Errorf("file download: read entry %s", addr) jsonhttp.NotFound(w, nil) return } e := &entry.Entry{} err = e.UnmarshalBinary(buf.Bytes()) if err != nil { s.Logger.Debugf("file download: unmarshal entry %s: %v", addr, err) s.Logger.Errorf("file download: unmarshal entry %s", addr) jsonhttp.InternalServerError(w, "error unmarshaling entry") return } // If none match header is set always send the reply as not modified // TODO: when SOC comes, we need to revisit this concept noneMatchEtag := r.Header.Get("If-None-Match") if noneMatchEtag != "" { if e.Reference().Equal(address) { w.WriteHeader(http.StatusNotModified) return } } // Read metadata. buf = bytes.NewBuffer(nil) _, err = file.JoinReadAll(r.Context(), j, e.Metadata(), buf, toDecrypt) if err != nil { s.Logger.Debugf("file download: read metadata %s: %v", addr, err) s.Logger.Errorf("file download: read metadata %s", addr) jsonhttp.NotFound(w, nil) return } metaData := &entry.Metadata{} err = json.Unmarshal(buf.Bytes(), metaData) if err != nil { s.Logger.Debugf("file download: unmarshal metadata %s: %v", addr, err) s.Logger.Errorf("file download: unmarshal metadata %s", addr) jsonhttp.InternalServerError(w, "error unmarshaling metadata") return } additionalHeaders := http.Header{ "Content-Disposition": {fmt.Sprintf("inline; filename=\"%s\"", metaData.Filename)}, "Content-Type": {metaData.MimeType}, } s.downloadHandler(w, r, e.Reference(), additionalHeaders) } // downloadHandler contains common logic for dowloading Swarm file from API func (s *server) downloadHandler( w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, ) { targets := r.URL.Query().Get("targets") r = r.WithContext(context.WithValue(r.Context(), targetsContextKey{}, targets)) ctx := r.Context() toDecrypt := len(reference.Bytes()) == (swarm.HashSize + encryption.KeyLength) j := joiner.NewSimpleJoiner(s.Storer) // send the file data back in the response dataSize, err := j.Size(ctx, reference) if err != nil { if errors.Is(err, storage.ErrNotFound) { s.Logger.Debugf("api download: not found %s: %v", reference, err) s.Logger.Error("api download: not found") jsonhttp.NotFound(w, "not found") return } s.Logger.Debugf("api download: invalid root chunk %s: %v", reference, err) s.Logger.Error("api download: invalid root chunk") jsonhttp.BadRequest(w, "invalid root chunk") return } pr, pw := io.Pipe() defer pr.Close() go func() { ctx := r.Context() <-ctx.Done() if err := ctx.Err(); err != nil { if err := pr.CloseWithError(err); err != nil { s.Logger.Debugf("api download: data join close %s: %v", reference, err) s.Logger.Errorf("api download: data join close %s", reference) } } }() go func() { _, err := file.JoinReadAll(r.Context(), j, reference, pw, toDecrypt) if err := pw.CloseWithError(err); err != nil { s.Logger.Debugf("api download: data join close %s: %v", reference, err) s.Logger.Errorf("api download: data join close %s", reference) } }() bpr := bufio.NewReader(pr) if b, err := bpr.Peek(defaultBufSize); err != nil && err != io.EOF && len(b) == 0 { s.Logger.Debugf("api download: data join %s: %v", reference, err) s.Logger.Errorf("api download: data join %s", reference) jsonhttp.NotFound(w, nil) return } // include additional headers for name, values := range additionalHeaders { var v string for _, value := range values { if v != "" { v += "; " } v += value } w.Header().Set(name, v) } w.Header().Set("ETag", fmt.Sprintf("%q", reference)) w.Header().Set("Content-Length", fmt.Sprintf("%d", dataSize)) w.Header().Set("Decompressed-Content-Length", fmt.Sprintf("%d", dataSize)) w.Header().Set(TargetsRecoveryHeader, targets) if _, err = io.Copy(w, bpr); err != nil { s.Logger.Debugf("api download: data read %s: %v", reference, err) s.Logger.Errorf("api download: data read %s", reference) } }
1
11,689
I would skip this check or add an internal server error response.
ethersphere-bee
go
@@ -7,6 +7,12 @@ const createDist = (buildConfig = config.defaultBuildConfig, options) => { config.buildConfig = buildConfig config.update(options) + if (config.notarize) { + notarize = config.notarize + notary_user = config.notary_user + notary_password = config.notary_password + } + util.updateBranding() fs.removeSync(path.join(config.outputDir, 'dist')) config.buildTarget = 'create_dist'
1
const config = require('../lib/config') const util = require('../lib/util') const path = require('path') const fs = require('fs-extra') const createDist = (buildConfig = config.defaultBuildConfig, options) => { config.buildConfig = buildConfig config.update(options) util.updateBranding() fs.removeSync(path.join(config.outputDir, 'dist')) config.buildTarget = 'create_dist' util.buildTarget() } module.exports = createDist
1
6,073
missing `{` here (and then `}` after `notary_password = config.notary_password`); it's only going to do the first one
brave-brave-browser
js
@@ -538,11 +538,10 @@ func TestDdevImportDB(t *testing.T) { assert.True(settingsHashSalt) case "wordpress": // nolint: vetshadow - hasAuthSalt, err := fileutil.FgrepStringInFile(app.SiteSettingsPath, "SECURE_AUTH_SALT") + hasAuthSalt, err := fileutil.FgrepStringInFile(app.SiteLocalSettingsPath, "SECURE_AUTH_SALT") assert.NoError(err) assert.True(hasAuthSalt) } - } if site.DBTarURL != "" {
1
package ddevapp_test import ( "fmt" "io/ioutil" "os" "path" "path/filepath" "regexp" "runtime" "strconv" "strings" "testing" "time" "github.com/drud/ddev/pkg/archive" "github.com/drud/ddev/pkg/ddevapp" "github.com/drud/ddev/pkg/dockerutil" "github.com/drud/ddev/pkg/exec" "github.com/drud/ddev/pkg/fileutil" "github.com/drud/ddev/pkg/output" "github.com/drud/ddev/pkg/testcommon" "github.com/drud/ddev/pkg/util" "github.com/drud/ddev/pkg/version" "github.com/fsouza/go-dockerclient" "github.com/google/uuid" "github.com/lunixbochs/vtclean" log "github.com/sirupsen/logrus" asrt "github.com/stretchr/testify/assert" ) var ( TestSites = []testcommon.TestSite{ { Name: "TestPkgWordpress", SourceURL: "https://github.com/drud/wordpress/archive/v0.4.0.tar.gz", ArchiveInternalExtractionPath: "wordpress-0.4.0/", FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.0/wordpress_files.tar.gz", DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.0/wordpress_db.tar.gz", Docroot: "htdocs", Type: "wordpress", Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/readme.html", Expect: "Welcome. WordPress is a very special project to me."}, DynamicURI: testcommon.URIWithExpect{URI: "/", Expect: "this post has a photo"}, FilesImageURI: "/wp-content/uploads/2017/04/pexels-photo-265186-1024x683.jpeg", }, { Name: "TestPkgDrupal8", SourceURL: "https://ftp.drupal.org/files/projects/drupal-8.6.1.tar.gz", ArchiveInternalExtractionPath: "drupal-8.6.1/", FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/drupal8_6_1_files.tar.gz", FilesZipballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.0/drupal8_files.zip", DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/drupal8_6_1_db.tar.gz", DBZipURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.0/drupal8_db.zip", FullSiteTarballURL: "", Type: "drupal8", Docroot: "", Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.txt", Expect: "Drupal is an open source content management platform"}, DynamicURI: testcommon.URIWithExpect{URI: "/node/1", Expect: "this is a post with an image"}, FilesImageURI: "/sites/default/files//2017-04/pexels-photo-265186.jpeg", }, { Name: "TestPkgDrupal7", // Drupal Kickstart on D7 SourceURL: "https://ftp.drupal.org/files/projects/drupal-7.59.tar.gz", ArchiveInternalExtractionPath: "drupal-7.59/", FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d7test-7.59.files.tar.gz", DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d7test-7.59-db.tar.gz", FullSiteTarballURL: "", Docroot: "", Type: "drupal7", Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.txt", Expect: "Drupal is an open source content management platform"}, DynamicURI: testcommon.URIWithExpect{URI: "/node/1", Expect: "D7 test project, kittens edition"}, FilesImageURI: "/sites/default/files/field/image/kittens-large.jpg", FullSiteArchiveExtPath: "docroot/sites/default/files", }, { Name: "TestPkgDrupal6", SourceURL: "https://ftp.drupal.org/files/projects/drupal-6.38.tar.gz", ArchiveInternalExtractionPath: "drupal-6.38/", DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/drupal6.38_db.tar.gz", FullSiteTarballURL: "", FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/drupal6_files.tar.gz", Docroot: "", Type: "drupal6", Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/CHANGELOG.txt", Expect: "Drupal 6.38, 2016-02-24"}, DynamicURI: testcommon.URIWithExpect{URI: "/node/2", Expect: "This is a story. The story is somewhat shaky"}, FilesImageURI: "/sites/default/files/garland_logo.jpg", }, { Name: "TestPkgBackdrop", SourceURL: "https://github.com/backdrop/backdrop/archive/1.11.0.tar.gz", ArchiveInternalExtractionPath: "backdrop-1.11.0/", DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/backdrop_db.11.0.tar.gz", FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/backdrop_files.11.0.tar.gz", FullSiteTarballURL: "", Docroot: "", Type: "backdrop", Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.md", Expect: "Backdrop is a full-featured content management system"}, DynamicURI: testcommon.URIWithExpect{URI: "/posts/first-post-all-about-kittens", Expect: "Lots of kittens are a good thing"}, FilesImageURI: "/files/styles/large/public/field/image/kittens-large.jpg", }, { Name: "TestPkgTypo3", SourceURL: "https://github.com/drud/typo3-v9-test/archive/v0.2.2.tar.gz", ArchiveInternalExtractionPath: "typo3-v9-test-0.2.2/", DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/typo3_v9.5_introduction_db.tar.gz", FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/typo3_v9.5_introduction_files.tar.gz", FullSiteTarballURL: "", Docroot: "public", Type: "typo3", Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.txt", Expect: "junk readme simply for reading"}, DynamicURI: testcommon.URIWithExpect{URI: "/index.php?id=65", Expect: "Boxed Content"}, FilesImageURI: "/fileadmin/introduction/images/streets/nikita-maru-70928.jpg", }, } FullTestSites = TestSites ) func TestMain(m *testing.M) { output.LogSetUp() // Ensure the ddev directory is created before tests run. _ = util.GetGlobalDdevDir() // Since this may be first time ddev has been used, we need the // ddev_default network available. dockerutil.EnsureDdevNetwork() // Avoid having sudo try to add to /etc/hosts. // This is normally done by Testsite.Prepare() _ = os.Setenv("DRUD_NONINTERACTIVE", "true") // Attempt to remove all running containers before starting a test. // If no projects are running, this will exit silently and without error. // If a system doesn't have `ddev` in its $PATH, this will emit a warning but will not fail the test. if _, err := exec.RunCommand("ddev", []string{"remove", "--all"}); err != nil { log.Warnf("Failed to remove all running projects: %v", err) } count := len(ddevapp.GetApps()) if count > 0 { log.Fatalf("ddevapp tests require no projects running. You have %v project(s) running.", count) } // If GOTEST_SHORT is an integer, then use it as index for a single usage // in the array. Any value can be used, it will default to just using the // first site in the array. gotestShort := os.Getenv("GOTEST_SHORT") if gotestShort != "" { useSite := 0 if site, err := strconv.Atoi(gotestShort); err == nil && site >= 0 && site < len(TestSites) { useSite = site } TestSites = []testcommon.TestSite{TestSites[useSite]} } // testRun is the exit result we'll provide. // Start with a clean exit result, it will be changed if we have trouble. testRun := 0 for i := range TestSites { err := TestSites[i].Prepare() if err != nil { log.Fatalf("Prepare() failed on TestSite.Prepare() site=%s, err=%v", TestSites[i].Name, err) } switchDir := TestSites[i].Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s Start", TestSites[i].Name)) testcommon.ClearDockerEnv() app := &ddevapp.DdevApp{} err = app.Init(TestSites[i].Dir) if err != nil { testRun = -1 log.Errorf("TestMain startup: app.Init() failed on site %s in dir %s, err=%v", TestSites[i].Name, TestSites[i].Dir, err) continue } runTime() switchDir() } if testRun == 0 { log.Debugln("Running tests.") testRun = m.Run() } for i, site := range TestSites { runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s Remove", site.Name)) testcommon.ClearDockerEnv() app := &ddevapp.DdevApp{} err := app.Init(site.Dir) if err != nil { log.Fatalf("TestMain shutdown: app.Init() failed on site %s in dir %s, err=%v", TestSites[i].Name, TestSites[i].Dir, err) } if app.SiteStatus() != ddevapp.SiteNotFound { err = app.Down(true, false) if err != nil { log.Fatalf("TestMain shutdown: app.Down() failed on site %s, err=%v", TestSites[i].Name, err) } } runTime() site.Cleanup() } os.Exit(testRun) } // TestDdevStart tests the functionality that is called when "ddev start" is executed func TestDdevStart(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} // Make sure this leaves us in the original test directory testDir, _ := os.Getwd() //nolint: errcheck defer os.Chdir(testDir) for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevStart", site.Name)) err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) // ensure docker-compose.yaml exists inside .ddev site folder composeFile := fileutil.FileExists(app.DockerComposeYAMLPath()) assert.True(composeFile) for _, containerType := range [3]string{"web", "db", "dba"} { //nolint: vetshadow containerName, err := constructContainerName(containerType, app) assert.NoError(err) check, err := testcommon.ContainerCheck(containerName, "running") assert.NoError(err) assert.True(check, "Container check on %s failed", containerType) } err = app.Down(true, false) assert.NoError(err) runTime() switchDir() } // Start up TestSites[0] again site := TestSites[0] err := os.Chdir(site.Dir) assert.NoError(err) err = app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) // try to start a site of same name at different path another := TestSites[0] err = another.Prepare() if err != nil { assert.FailNow("TestDdevStart: Prepare() failed on another.Prepare(), err=%v", err) return } badapp := &ddevapp.DdevApp{} err = badapp.Init(another.Dir) assert.Error(err) if err != nil { assert.Contains(err.Error(), fmt.Sprintf("a project (web container) in running state already exists for %s that was created at %s", TestSites[0].Name, TestSites[0].Dir)) } // Make sure that GetActiveApp() also fails when trying to start app of duplicate name in current directory. switchDir := another.Chdir() _, err = ddevapp.GetActiveApp("") assert.Error(err) if err != nil { assert.Contains(err.Error(), fmt.Sprintf("a project (web container) in running state already exists for %s that was created at %s", TestSites[0].Name, TestSites[0].Dir)) } testcommon.CleanupDir(another.Dir) switchDir() // Clean up site 0 err = app.Down(true, false) assert.NoError(err) } // TestDdevStartMultipleHostnames tests start with multiple hostnames func TestDdevStartMultipleHostnames(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevStartMultipleHostnames", site.Name)) testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) // site.Name is explicitly added because if not removed in GetHostNames() it will cause ddev-router failure // "a" is repeated for the same reason; a user error of this type should not cause a failure; GetHostNames() // should uniqueify them. app.AdditionalHostnames = []string{"sub1." + site.Name, "sub2." + site.Name, "subname.sub3." + site.Name, site.Name, site.Name, site.Name} // sub1.<sitename>.ddev.local and sitename.ddev.local are deliberately included to prove they don't // cause ddev-router failures" app.AdditionalFQDNs = []string{"one.example.com", "two.example.com", "a.one.example.com", site.Name + "." + version.DDevTLD, "sub1." + site.Name + version.DDevTLD} err = app.WriteConfig() assert.NoError(err) err = app.Start() assert.NoError(err) if err != nil && strings.Contains(err.Error(), "db container failed") { stdout := testcommon.CaptureUserOut() err = app.Logs("db", false, false, "") assert.NoError(err) out := stdout() t.Logf("DB Logs after app.Start: \n%s\n=== END DB LOGS ===", out) } // ensure docker-compose.yaml exists inside .ddev site folder composeFile := fileutil.FileExists(app.DockerComposeYAMLPath()) assert.True(composeFile) for _, containerType := range [3]string{"web", "db", "dba"} { //nolint: vetshadow containerName, err := constructContainerName(containerType, app) assert.NoError(err) check, err := testcommon.ContainerCheck(containerName, "running") assert.NoError(err) assert.True(check, "Container check on %s failed", containerType) } for _, hostname := range app.GetHostnames() { testcommon.EnsureLocalHTTPContent(t, "http://"+hostname+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect) testcommon.EnsureLocalHTTPContent(t, "https://"+hostname+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect) } // Multiple projects can't run at the same time with the fqdns, so we need to clean // up these for tests that run later. app.AdditionalFQDNs = []string{} app.AdditionalHostnames = []string{} err = app.WriteConfig() assert.NoError(err) err = app.Down(true, false) assert.NoError(err) runTime() } } // TestDdevXdebugEnabled tests running with xdebug_enabled = true, etc. func TestDdevXdebugEnabled(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} site := TestSites[0] runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevXdebugEnabled", site.Name)) err := app.Init(site.Dir) assert.NoError(err) // Run with xdebug_enabled: false testcommon.ClearDockerEnv() app.XdebugEnabled = false err = app.WriteConfig() assert.NoError(err) err = app.Start() assert.NoError(err) stdout, _, err := app.Exec("web", "php", "--ri", "xdebug") assert.Error(err) assert.Contains(stdout, "Extension 'xdebug' not present") // Run with xdebug_enabled: true //err = app.Stop() testcommon.ClearDockerEnv() app.XdebugEnabled = true err = app.WriteConfig() assert.NoError(err) err = app.Start() assert.NoError(err) stdout, _, err = app.Exec("web", "php", "--ri", "xdebug") assert.NoError(err) assert.Contains(stdout, "xdebug support => enabled") assert.Contains(stdout, "xdebug.remote_host => host.docker.internal => host.docker.internal") err = app.Down(true, false) assert.NoError(err) runTime() } // TestDdevMysqlWorks tests that mysql client can be run in both containers. func TestDdevMysqlWorks(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} site := TestSites[0] runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevMysqlWorks", site.Name)) err := app.Init(site.Dir) assert.NoError(err) testcommon.ClearDockerEnv() err = app.Start() assert.NoError(err) // Test that mysql + .my.cnf works on web container _, _, err = app.Exec("web", "bash", "-c", "mysql -e 'SELECT USER();' | grep 'db@'") assert.NoError(err) _, _, err = app.Exec("web", "bash", "-c", "mysql -e 'SELECT DATABASE();' | grep 'db'") assert.NoError(err) // Test that mysql + .my.cnf works on db container _, _, err = app.Exec("db", "bash", "-c", "mysql -e 'SELECT USER();' | grep 'root@localhost'") assert.NoError(err) _, _, err = app.Exec("db", "bash", "-c", "mysql -e 'SELECT DATABASE();' | grep 'db'") assert.NoError(err) err = app.Down(true, false) assert.NoError(err) runTime() } // TestStartWithoutDdev makes sure we don't have a regression where lack of .ddev // causes a panic. func TestStartWithoutDdevConfig(t *testing.T) { // Set up tests and give ourselves a working directory. assert := asrt.New(t) testDir := testcommon.CreateTmpDir("TestStartWithoutDdevConfig") // testcommon.Chdir()() and CleanupDir() check their own errors (and exit) defer testcommon.CleanupDir(testDir) defer testcommon.Chdir(testDir)() err := os.MkdirAll(testDir+"/sites/default", 0777) assert.NoError(err) err = os.Chdir(testDir) assert.NoError(err) _, err = ddevapp.GetActiveApp("") assert.Error(err) if err != nil { assert.Contains(err.Error(), "Could not find a project") } } // TestGetApps tests the GetApps function to ensure it accurately returns a list of running applications. func TestGetApps(t *testing.T) { assert := asrt.New(t) // Start the apps. for _, site := range TestSites { testcommon.ClearDockerEnv() app := &ddevapp.DdevApp{} err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) } apps := ddevapp.GetApps() assert.Equal(len(apps), len(TestSites)) for _, testSite := range TestSites { var found bool for _, app := range apps { if testSite.Name == app.GetName() { found = true break } } assert.True(found, "Found testSite %s in list", testSite.Name) } // Now shut down all sites as we expect them to be shut down. for _, site := range TestSites { testcommon.ClearDockerEnv() app := &ddevapp.DdevApp{} err := app.Init(site.Dir) assert.NoError(err) err = app.Down(true, false) assert.NoError(err) } } // TestDdevImportDB tests the functionality that is called when "ddev import-db" is executed func TestDdevImportDB(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} testDir, _ := os.Getwd() for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevImportDB", site.Name)) testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) // Test simple db loads. for _, file := range []string{"users.sql", "users.mysql", "users.sql.gz", "users.mysql.gz", "users.sql.tar", "users.mysql.tar", "users.sql.tar.gz", "users.mysql.tar.gz", "users.sql.tgz", "users.mysql.tgz", "users.sql.zip", "users.mysql.zip"} { path := filepath.Join(testDir, "testdata", file) err = app.ImportDB(path, "") assert.NoError(err, "Failed to app.ImportDB path: %s err: %v", path, err) // Test that a settings file has correct hash_salt format switch app.Type { case "drupal7": // nolint: vetshadow drupalHashSalt, err := fileutil.FgrepStringInFile(app.SiteLocalSettingsPath, "$drupal_hash_salt") assert.NoError(err) assert.True(drupalHashSalt) case "drupal8": // nolint: vetshadow settingsHashSalt, err := fileutil.FgrepStringInFile(app.SiteLocalSettingsPath, "settings['hash_salt']") assert.NoError(err) assert.True(settingsHashSalt) case "wordpress": // nolint: vetshadow hasAuthSalt, err := fileutil.FgrepStringInFile(app.SiteSettingsPath, "SECURE_AUTH_SALT") assert.NoError(err) assert.True(hasAuthSalt) } } if site.DBTarURL != "" { // nolint: vetshadow _, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_siteTarArchive", "", site.DBTarURL) assert.NoError(err) err = app.ImportDB(cachedArchive, "") assert.NoError(err) out, _, err := app.Exec("db", "mysql", "-e", "SHOW TABLES;") assert.NoError(err) assert.Contains(out, "Tables_in_db") assert.False(strings.Contains(out, "Empty set")) assert.NoError(err) } if site.DBZipURL != "" { // nolint: vetshadow _, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_siteZipArchive", "", site.DBZipURL) assert.NoError(err) err = app.ImportDB(cachedArchive, "") assert.NoError(err) out, _, err := app.Exec("db", "mysql", "-e", "SHOW TABLES;") assert.NoError(err) assert.Contains(out, "Tables_in_db") assert.False(strings.Contains(out, "Empty set")) } if site.FullSiteTarballURL != "" { // nolint: vetshadow _, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_FullSiteTarballURL", "", site.FullSiteTarballURL) assert.NoError(err) err = app.ImportDB(cachedArchive, "data.sql") assert.NoError(err, "Failed to find data.sql at root of tarball %s", cachedArchive) } // We don't want all the projects running at once. err = app.Down(true, false) assert.NoError(err) runTime() switchDir() } } // TestDdevFullSiteSetup tests a full import-db and import-files and then looks to see if // we have a spot-test success hit on a URL func TestDdevFullSiteSetup(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevFullSiteSetup", site.Name)) testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) if site.DBTarURL != "" { // nolint: vetshadow _, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_siteTarArchive", "", site.DBTarURL) assert.NoError(err) err = app.ImportDB(cachedArchive, "") assert.NoError(err) } if site.FilesTarballURL != "" { // nolint: vetshadow _, tarballPath, err := testcommon.GetCachedArchive(site.Name, "local-tarballs-files", "", site.FilesTarballURL) assert.NoError(err) err = app.ImportFiles(tarballPath, "") assert.NoError(err) } // Test static content. testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL()+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect) // Test dynamic php + database content. //testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL()+site.DynamicURI.URI, site.DynamicURI.Expect) rawurl := app.GetHTTPURL() + site.DynamicURI.URI body, resp, err := testcommon.GetLocalHTTPResponse(t, rawurl, 40) assert.NoError(err, "GetLocalHTTPResponse returned err on rawurl %s, resp=$v: %v", rawurl, resp, err) if err != nil { stdout := testcommon.CaptureUserOut() err = app.Logs("web", false, false, "") assert.NoError(err) out := stdout() t.Logf("Logs after GetLocalHTTPResponse: %s", out) } assert.Contains(body, site.DynamicURI.Expect) // Load an image from the files section if site.FilesImageURI != "" { // nolint: vetshadow _, resp, err := testcommon.GetLocalHTTPResponse(t, app.GetHTTPURL()+site.FilesImageURI) assert.NoError(err) assert.Equal("image/jpeg", resp.Header["Content-Type"][0]) } // We don't want all the projects running at once. err = app.Down(true, false) assert.NoError(err) runTime() switchDir() } } // TestDdevRestoreSnapshot tests creating a snapshot and reverting to it func TestDdevRestoreSnapshot(t *testing.T) { assert := asrt.New(t) testDir, _ := os.Getwd() app := &ddevapp.DdevApp{} runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("TestDdevRestoreSnapshot")) d7testerTest1Dump, err := filepath.Abs(filepath.Join("testdata", "restore_snapshot", "d7tester_test_1.sql.gz")) assert.NoError(err) d7testerTest2Dump, err := filepath.Abs(filepath.Join("testdata", "restore_snapshot", "d7tester_test_2.sql.gz")) assert.NoError(err) // Use d7 only for this test, the key thing is the database interaction site := FullTestSites[2] // If running this with GOTEST_SHORT we have to create the directory, tarball etc. if site.Dir == "" || !fileutil.FileExists(site.Dir) { err = site.Prepare() if err != nil { t.Fatalf("Prepare() failed on TestSite.Prepare() site=%s, err=%v", site.Name, err) } } switchDir := site.Chdir() testcommon.ClearDockerEnv() err = app.Init(site.Dir) if err != nil { if app.SiteStatus() != ddevapp.SiteRunning { t.Fatalf("app.Init() failed on site %s in dir %s, err=%v", site.Name, site.Dir, err) } } err = app.Start() if err != nil { t.Fatalf("TestMain startup: app.Start() failed on site %s, err=%v", site.Name, err) } err = app.ImportDB(d7testerTest1Dump, "") assert.NoError(err, "Failed to app.ImportDB path: %s err: %v", d7testerTest1Dump, err) testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL(), "d7 tester test 1 has 1 node") // Make a snapshot of d7 tester test 1 backupsDir := filepath.Join(app.GetConfigPath(""), "db_snapshots") snapshotName, err := app.SnapshotDatabase("d7testerTest1") assert.NoError(err) assert.EqualValues(snapshotName, "d7testerTest1") assert.True(fileutil.FileExists(filepath.Join(backupsDir, snapshotName, "xtrabackup_info"))) err = app.ImportDB(d7testerTest2Dump, "") assert.NoError(err, "Failed to app.ImportDB path: %s err: %v", d7testerTest2Dump, err) testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL(), "d7 tester test 2 has 2 nodes") snapshotName, err = app.SnapshotDatabase("d7testerTest2") assert.NoError(err) assert.EqualValues(snapshotName, "d7testerTest2") assert.True(fileutil.FileExists(filepath.Join(backupsDir, snapshotName, "xtrabackup_info"))) err = app.RestoreSnapshot("d7testerTest1") assert.NoError(err) testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL(), "d7 tester test 1 has 1 node") err = app.RestoreSnapshot("d7testerTest2") assert.NoError(err) //testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL(), "d7 tester test 2 has 2 nodes") body, resp, err := testcommon.GetLocalHTTPResponse(t, app.GetHTTPURL(), 40) assert.NoError(err, "GetLocalHTTPResponse returned err on rawurl %s: %v", app.GetHTTPURL(), err) assert.Contains(body, "d7 tester test 2 has 2 nodes") if err != nil { t.Logf("resp after timeout: %v", resp) stdout := testcommon.CaptureUserOut() err = app.Logs("web", false, false, "") assert.NoError(err) out := stdout() t.Logf("web container logs after timeout: %s", out) } // Attempt a restore with a pre-mariadb_10.2 snapshot. It should fail and give a link. oldSnapshotTarball, err := filepath.Abs(filepath.Join(testDir, "testdata", "restore_snapshot", "d7tester_test_1.snapshot_mariadb_10_1.tgz")) assert.NoError(err) err = archive.Untar(oldSnapshotTarball, filepath.Join(site.Dir, ".ddev", "db_snapshots", "oldsnapshot"), "") assert.NoError(err) err = app.RestoreSnapshot("oldsnapshot") assert.Error(err) assert.Contains(err.Error(), "is not compatible with this version of ddev and mariadb") err = app.Down(true, false) assert.NoError(err) // TODO: Check behavior of ddev rm with snapshot, see if it has right stuff in it. runTime() switchDir() } // TestWriteableFilesDirectory tests to make sure that files created on host are writable on container // and files ceated in container are correct user on host. func TestWriteableFilesDirectory(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevImportDB", site.Name)) testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) uploadDir := app.GetUploadDir() if uploadDir != "" { // Use exec to touch a file in the container and see what the result is. Make sure it comes out with ownership // making it writeable on the host. filename := fileutil.RandomFilenameBase() dirname := fileutil.RandomFilenameBase() // Use path.Join for items on th container (linux) and filepath.Join for items on the host. inContainerDir := path.Join(uploadDir, dirname) onHostDir := filepath.Join(app.Docroot, inContainerDir) inContainerRelativePath := path.Join(inContainerDir, filename) onHostRelativePath := path.Join(onHostDir, filename) err = os.MkdirAll(onHostDir, 0775) assert.NoError(err) _, _, err = app.Exec("web", "sh", "-c", "echo 'content created inside container\n' >"+inContainerRelativePath) assert.NoError(err) // Now try to append to the file on the host. // os.OpenFile() for append here fails if the file does not already exist. //nolint: vetshadow f, err := os.OpenFile(onHostRelativePath, os.O_APPEND|os.O_WRONLY, 0660) assert.NoError(err) _, err = f.WriteString("this addition to the file was added on the host side") assert.NoError(err) _ = f.Close() // Create a file on the host and see what the result is. Make sure we can not append/write to it in the container. filename = fileutil.RandomFilenameBase() dirname = fileutil.RandomFilenameBase() inContainerDir = path.Join(uploadDir, dirname) onHostDir = filepath.Join(app.Docroot, inContainerDir) inContainerRelativePath = path.Join(inContainerDir, filename) onHostRelativePath = filepath.Join(onHostDir, filename) err = os.MkdirAll(onHostDir, 0775) assert.NoError(err) f, err = os.OpenFile(onHostRelativePath, os.O_CREATE|os.O_RDWR, 0660) assert.NoError(err) _, err = f.WriteString("this base content was inserted on the host side\n") assert.NoError(err) _ = f.Close() // if the file exists, add to it. We don't want to add if it's not already there. _, _, err = app.Exec("web", "sh", "-c", "if [ -f "+inContainerRelativePath+" ]; then echo 'content added inside container\n' >>"+inContainerRelativePath+"; fi") assert.NoError(err) // grep the file for both the content added on host and that added in container. _, _, err = app.Exec("web", "sh", "-c", "grep 'base content was inserted on the host' "+inContainerRelativePath+"&& grep 'content added inside container' "+inContainerRelativePath) assert.NoError(err) } err = app.Down(true, false) assert.NoError(err) runTime() switchDir() } } // TestDdevImportFilesDir tests that "ddev import-files" can successfully import non-archive directories func TestDdevImportFilesDir(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} // Create a dummy directory to test non-archive imports importDir, err := ioutil.TempDir("", t.Name()) assert.NoError(err) fileNames := make([]string, 0) for i := 0; i < 5; i++ { fileName := uuid.New().String() fileNames = append(fileNames, fileName) fullPath := filepath.Join(importDir, fileName) err = ioutil.WriteFile(fullPath, []byte(fileName), 0644) assert.NoError(err) } for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name())) testcommon.ClearDockerEnv() err = app.Init(site.Dir) assert.NoError(err) // Function under test err = app.ImportFiles(importDir, "") assert.NoError(err, "Importing a directory returned an error:", err) // Confirm contents of destination dir after import absUploadDir := filepath.Join(app.AppRoot, app.Docroot, app.GetUploadDir()) uploadedFiles, err := ioutil.ReadDir(absUploadDir) assert.NoError(err) uploadedFilesMap := map[string]bool{} for _, uploadedFile := range uploadedFiles { uploadedFilesMap[filepath.Base(uploadedFile.Name())] = true } for _, expectedFile := range fileNames { assert.True(uploadedFilesMap[expectedFile], "Expected file %s not found for site: %s", expectedFile, site.Name) } runTime() switchDir() } } // TestDdevImportFiles tests the functionality that is called when "ddev import-files" is executed func TestDdevImportFiles(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name())) testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) if site.FilesTarballURL != "" { _, tarballPath, err := testcommon.GetCachedArchive(site.Name, "local-tarballs-files", "", site.FilesTarballURL) assert.NoError(err) err = app.ImportFiles(tarballPath, "") assert.NoError(err) } if site.FilesZipballURL != "" { _, zipballPath, err := testcommon.GetCachedArchive(site.Name, "local-zipballs-files", "", site.FilesZipballURL) assert.NoError(err) err = app.ImportFiles(zipballPath, "") assert.NoError(err) } if site.FullSiteTarballURL != "" && site.FullSiteArchiveExtPath != "" { _, siteTarPath, err := testcommon.GetCachedArchive(site.Name, "local-site-tar", "", site.FullSiteTarballURL) assert.NoError(err) err = app.ImportFiles(siteTarPath, site.FullSiteArchiveExtPath) assert.NoError(err) } runTime() switchDir() } } // TestDdevImportFilesCustomUploadDir ensures that files are imported to a custom upload directory when requested func TestDdevImportFilesCustomUploadDir(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name())) testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) // Set custom upload dir app.UploadDir = "my/upload/dir" absUploadDir := filepath.Join(app.AppRoot, app.Docroot, app.UploadDir) err = os.MkdirAll(absUploadDir, 0755) assert.NoError(err) if site.FilesTarballURL != "" { _, tarballPath, err := testcommon.GetCachedArchive(site.Name, "local-tarballs-files", "", site.FilesTarballURL) assert.NoError(err) err = app.ImportFiles(tarballPath, "") assert.NoError(err) // Ensure upload dir isn't empty fileInfoSlice, err := ioutil.ReadDir(absUploadDir) assert.NoError(err) assert.NotEmpty(fileInfoSlice) } if site.FilesZipballURL != "" { _, zipballPath, err := testcommon.GetCachedArchive(site.Name, "local-zipballs-files", "", site.FilesZipballURL) assert.NoError(err) err = app.ImportFiles(zipballPath, "") assert.NoError(err) // Ensure upload dir isn't empty fileInfoSlice, err := ioutil.ReadDir(absUploadDir) assert.NoError(err) assert.NotEmpty(fileInfoSlice) } if site.FullSiteTarballURL != "" && site.FullSiteArchiveExtPath != "" { _, siteTarPath, err := testcommon.GetCachedArchive(site.Name, "local-site-tar", "", site.FullSiteTarballURL) assert.NoError(err) err = app.ImportFiles(siteTarPath, site.FullSiteArchiveExtPath) assert.NoError(err) // Ensure upload dir isn't empty fileInfoSlice, err := ioutil.ReadDir(absUploadDir) assert.NoError(err) assert.NotEmpty(fileInfoSlice) } runTime() switchDir() } } // TestDdevExec tests the execution of commands inside a docker container of a site. func TestDdevExec(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevExec", site.Name)) err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) out, _, err := app.Exec("web", "pwd") assert.NoError(err) assert.Contains(out, "/var/www/html") _, _, err = app.Exec("db", "mysql", "-e", "DROP DATABASE db;") assert.NoError(err) _, _, err = app.Exec("db", "mysql", "information_schema", "-e", "CREATE DATABASE db;") assert.NoError(err) switch app.GetType() { case "drupal6": fallthrough case "drupal7": fallthrough case "drupal8": out, _, err = app.Exec("web", "drush", "status") assert.NoError(err) assert.Regexp("PHP configuration[ :]*/etc/php/[0-9].[0-9]/fpm/php.ini", out) case "wordpress": out, _, err = app.Exec("web", "wp", "--info") assert.NoError(err) assert.Regexp("/etc/php.*/php.ini", out) } err = app.Down(true, false) assert.NoError(err) runTime() switchDir() } } // TestDdevLogs tests the container log output functionality. func TestDdevLogs(t *testing.T) { assert := asrt.New(t) // Skip test because on Windows because the CaptureUserOut() hangs, at least // sometimes. if runtime.GOOS == "windows" { t.Skip("Skipping test TestDdevLogs on Windows") } app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevLogs", site.Name)) err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) stdout := testcommon.CaptureUserOut() err = app.Logs("web", false, false, "") assert.NoError(err) out := stdout() assert.Contains(out, "Server started") stdout = testcommon.CaptureUserOut() err = app.Logs("db", false, false, "") assert.NoError(err) out = stdout() assert.Contains(out, "MySQL init process done. Ready for start up.") // Test that we can get logs when project is stopped also err = app.Stop() assert.NoError(err) stdout = testcommon.CaptureUserOut() err = app.Logs("web", false, false, "") assert.NoError(err) out = stdout() assert.Contains(out, "Server started") stdout = testcommon.CaptureUserOut() err = app.Logs("db", false, false, "") assert.NoError(err) out = stdout() assert.Contains(out, "MySQL init process done. Ready for start up.") err = app.Down(true, false) assert.NoError(err) runTime() switchDir() } } // TestProcessHooks tests execution of commands defined in config.yaml func TestProcessHooks(t *testing.T) { assert := asrt.New(t) for _, site := range TestSites { cleanup := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s ProcessHooks", site.Name)) testcommon.ClearDockerEnv() app, err := ddevapp.NewApp(site.Dir, ddevapp.DefaultProviderName) assert.NoError(err) err = app.Start() assert.NoError(err) // Note that any ExecHost commands must be able to run on Windows. // echo and pwd are things that work pretty much the same in both places. app.Commands = map[string][]ddevapp.Command{ "hook-test": { { Exec: "ls /usr/local/bin/composer", }, { ExecHost: "echo something", }, }, } stdout := testcommon.CaptureUserOut() err = app.ProcessHooks("hook-test") assert.NoError(err) // Ignore color in putput, can be different in different OS's out := vtclean.Clean(stdout(), false) assert.Contains(out, "hook-test exec command succeeded, output below ---\n/usr/local/bin/composer") assert.Contains(out, "--- Running host command: echo something ---\nRunning Command Command=echo something\nsomething") err = app.Down(true, false) assert.NoError(err) runTime() cleanup() } } // TestDdevStop tests the functionality that is called when "ddev stop" is executed func TestDdevStop(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s DdevStop", site.Name)) testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) err = app.Stop() assert.NoError(err) for _, containerType := range [3]string{"web", "db", "dba"} { //nolint: vetshadow containerName, err := constructContainerName(containerType, app) assert.NoError(err) check, err := testcommon.ContainerCheck(containerName, "exited") assert.NoError(err) assert.True(check, containerType, "container has exited") } err = app.Down(true, false) assert.NoError(err) runTime() switchDir() } } // TestDdevStopMissingDirectory tests that the 'ddev stop' command works properly on sites with missing directories or ddev configs. func TestDdevStopMissingDirectory(t *testing.T) { assert := asrt.New(t) site := TestSites[0] testcommon.ClearDockerEnv() app := &ddevapp.DdevApp{} err := app.Init(site.Dir) assert.NoError(err) // Restart the site since it was stopped in the previous test. if app.SiteStatus() != ddevapp.SiteRunning { err = app.Start() assert.NoError(err) } tempPath := testcommon.CreateTmpDir("site-copy") siteCopyDest := filepath.Join(tempPath, "site") defer removeAllErrCheck(tempPath, assert) // Move the site directory to a temp location to mimic a missing directory. err = os.Rename(site.Dir, siteCopyDest) assert.NoError(err) err = app.Stop() assert.Error(err) assert.Contains(err.Error(), "If you would like to continue using ddev to manage this project please restore your files to that directory.") // Move the site directory back to its original location. err = os.Rename(siteCopyDest, site.Dir) assert.NoError(err) err = app.Down(true, false) assert.NoError(err) } // TestDescribe tests that the describe command works properly on a running // and also a stopped project. func TestDescribe(t *testing.T) { assert := asrt.New(t) app := &ddevapp.DdevApp{} for _, site := range TestSites { switchDir := site.Chdir() testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) // It may already be running, but start does no harm. err = app.Start() // If we have a problem starting, get the container logs and output. if err != nil { stdout := testcommon.CaptureUserOut() logsErr := app.Logs("web", false, false, "") assert.NoError(logsErr) out := stdout() healthcheck, inspectErr := exec.RunCommandPipe("bash", []string{"-c", fmt.Sprintf("docker inspect ddev-%s-web|jq -r '.[0].State.Health.Log[-1]'", app.Name)}) assert.NoError(inspectErr) assert.NoError(err, "app.Start(%s) failed: %v, \nweb container healthcheck='%s', \n=== web container logs=\n%s\n=== END web container logs ===", site.Name, err, healthcheck, out) } desc, err := app.Describe() assert.NoError(err) assert.EqualValues(ddevapp.SiteRunning, desc["status"], "") assert.EqualValues(app.GetName(), desc["name"]) assert.EqualValues(ddevapp.RenderHomeRootedDir(app.GetAppRoot()), desc["shortroot"]) assert.EqualValues(app.GetAppRoot(), desc["approot"]) assert.EqualValues(app.GetPhpVersion(), desc["php_version"]) // Now stop it and test behavior. err = app.Stop() assert.NoError(err) desc, err = app.Describe() assert.NoError(err) assert.EqualValues(ddevapp.SiteStopped, desc["status"]) err = app.Down(true, false) assert.NoError(err) switchDir() } } // TestDescribeMissingDirectory tests that the describe command works properly on sites with missing directories or ddev configs. func TestDescribeMissingDirectory(t *testing.T) { assert := asrt.New(t) site := TestSites[0] tempPath := testcommon.CreateTmpDir("site-copy") siteCopyDest := filepath.Join(tempPath, "site") defer removeAllErrCheck(tempPath, assert) app := &ddevapp.DdevApp{} err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) // Move the site directory to a temp location to mimick a missing directory. err = os.Rename(site.Dir, siteCopyDest) assert.NoError(err) desc, err := app.Describe() assert.NoError(err) assert.Contains(desc["status"], ddevapp.SiteDirMissing, "Status did not include the phrase '%s' when describing a site with missing directories.", ddevapp.SiteDirMissing) // Move the site directory back to its original location. err = os.Rename(siteCopyDest, site.Dir) assert.NoError(err) err = app.Down(true, false) assert.NoError(err) } // TestRouterPortsCheck makes sure that we can detect if the ports are available before starting the router. func TestRouterPortsCheck(t *testing.T) { assert := asrt.New(t) // First, stop any sites that might be running app := &ddevapp.DdevApp{} // Stop all sites, which should get the router out of there. for _, site := range TestSites { switchDir := site.Chdir() testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) if app.SiteStatus() == ddevapp.SiteRunning || app.SiteStatus() == ddevapp.SiteStopped { err = app.Down(true, false) assert.NoError(err) } switchDir() } // Now start one site, it's hard to get router to behave without one site. site := TestSites[0] testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) err = app.Start() assert.NoError(err) app, err = ddevapp.GetActiveApp(site.Name) if err != nil { t.Fatalf("Failed to GetActiveApp(%s), err:%v", site.Name, err) } err = app.Start() assert.NoError(err, "app.Start(%s) failed, err: %v", app.GetName(), err) // Stop the router using code from StopRouterIfNoContainers(). // StopRouterIfNoContainers can't be used here because it checks to see if containers are running // and doesn't do its job as a result. dest := ddevapp.RouterComposeYAMLPath() _, _, err = dockerutil.ComposeCmd([]string{dest}, "-p", ddevapp.RouterProjectName, "down", "-v") assert.NoError(err, "Failed to stop router using docker-compose, err=%v", err) // Occupy port 80 using docker busybox trick, then see if we can start router. // This is done with docker so that we don't have to use explicit sudo containerID, err := exec.RunCommand("sh", []string{"-c", "docker run -d -p80:80 --rm busybox:latest sleep 100 2>/dev/null"}) if err != nil { t.Fatalf("Failed to run docker command to occupy port 80, err=%v output=%v", err, containerID) } containerID = strings.TrimSpace(containerID) // Now try to start the router. It should fail because the port is occupied. err = ddevapp.StartDdevRouter() assert.Error(err, "Failure: router started even though port 80 was occupied") // Remove our dummy busybox docker container. out, err := exec.RunCommand("docker", []string{"rm", "-f", containerID}) assert.NoError(err, "Failed to docker rm the port-occupier container, err=%v output=%v", err, out) err = app.Down(true, false) assert.NoError(err) } // TestCleanupWithoutCompose ensures app containers can be properly cleaned up without a docker-compose config file present. func TestCleanupWithoutCompose(t *testing.T) { assert := asrt.New(t) // Skip test because we can't rename folders while they're in use if running on Windows. if runtime.GOOS == "windows" { t.Skip("Skipping test TestCleanupWithoutCompose on Windows") } site := TestSites[0] revertDir := site.Chdir() app := &ddevapp.DdevApp{} testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) // Ensure we have a site started so we have something to cleanup err = app.Start() assert.NoError(err) // Setup by creating temp directory and nesting a folder for our site. tempPath := testcommon.CreateTmpDir("site-copy") siteCopyDest := filepath.Join(tempPath, "site") defer removeAllErrCheck(tempPath, assert) // Move site directory to a temp directory to mimick a missing directory. err = os.Rename(site.Dir, siteCopyDest) assert.NoError(err) // Call the Down command() // Notice that we set the removeData parameter to true. // This gives us added test coverage over sites with missing directories // by ensuring any associated database files get cleaned up as well. err = app.Down(true, false) assert.NoError(err) for _, containerType := range [3]string{"web", "db", "dba"} { // nolint: vetshadow _, err := constructContainerName(containerType, app) assert.Error(err) } // Ensure there are no volumes associated with this project client := dockerutil.GetDockerClient() volumes, err := client.ListVolumes(docker.ListVolumesOptions{}) assert.NoError(err) for _, volume := range volumes { assert.False(volume.Labels["com.docker.compose.project"] == "ddev"+strings.ToLower(app.GetName())) } // Cleanup the global site database dirs. This does the work instead of running site.Cleanup() // because site.Cleanup() removes site directories that we'll need in other tests. dir := filepath.Join(util.GetGlobalDdevDir(), site.Name) err = os.RemoveAll(dir) assert.NoError(err) revertDir() // Move the site directory back to its original location. err = os.Rename(siteCopyDest, site.Dir) assert.NoError(err) } // TestGetappsEmpty ensures that GetApps returns an empty list when no applications are running. func TestGetAppsEmpty(t *testing.T) { assert := asrt.New(t) // Ensure test sites are removed for _, site := range TestSites { app := &ddevapp.DdevApp{} switchDir := site.Chdir() testcommon.ClearDockerEnv() err := app.Init(site.Dir) assert.NoError(err) if app.SiteStatus() != ddevapp.SiteNotFound { err = app.Down(true, false) assert.NoError(err) } switchDir() } apps := ddevapp.GetApps() assert.Equal(len(apps), 0, "Expected to find no apps but found %d apps=%v", len(apps), apps) } // TestRouterNotRunning ensures the router is shut down after all sites are stopped. func TestRouterNotRunning(t *testing.T) { assert := asrt.New(t) containers, err := dockerutil.GetDockerContainers(false) assert.NoError(err) for _, container := range containers { assert.NotEqual(dockerutil.ContainerName(container), "ddev-router", "ddev-router was not supposed to be running but it was") } } // TestListWithoutDir prevents regression where ddev list panics if one of the // sites found is missing a directory func TestListWithoutDir(t *testing.T) { // Set up tests and give ourselves a working directory. assert := asrt.New(t) testcommon.ClearDockerEnv() packageDir, _ := os.Getwd() // startCount is the count of apps at the start of this adventure apps := ddevapp.GetApps() startCount := len(apps) testDir := testcommon.CreateTmpDir("TestStartWithoutDdevConfig") defer testcommon.CleanupDir(testDir) err := os.MkdirAll(testDir+"/sites/default", 0777) assert.NoError(err) err = os.Chdir(testDir) assert.NoError(err) app, err := ddevapp.NewApp(testDir, ddevapp.DefaultProviderName) assert.NoError(err) app.Name = "junk" app.Type = "drupal7" err = app.WriteConfig() assert.NoError(err) // Do a start on the configured site. app, err = ddevapp.GetActiveApp("") assert.NoError(err) err = app.Start() assert.NoError(err) // Make sure we move out of the directory for Windows' sake garbageDir := testcommon.CreateTmpDir("RestingHere") defer testcommon.CleanupDir(garbageDir) err = os.Chdir(garbageDir) assert.NoError(err) testcommon.CleanupDir(testDir) apps = ddevapp.GetApps() assert.EqualValues(len(apps), startCount+1) // Make a whole table and make sure our app directory missing shows up. // This could be done otherwise, but we'd have to go find the site in the // array first. table := ddevapp.CreateAppTable() for _, site := range apps { // nolint: vetshadow desc, err := site.Describe() if err != nil { t.Fatalf("Failed to describe site %s: %v", site.GetName(), err) } ddevapp.RenderAppRow(table, desc) } // testDir on Windows has backslashes in it, resulting in invalid regexp // Remove them and use ., which is good enough. testDirSafe := strings.Replace(testDir, "\\", ".", -1) assert.Regexp(regexp.MustCompile("(?s)"+ddevapp.SiteDirMissing+".*"+testDirSafe), table.String()) err = app.Down(true, false) assert.NoError(err) // Change back to package dir. Lots of things will have to be cleaned up // in defers, and for windows we have to not be sitting in them. err = os.Chdir(packageDir) assert.NoError(err) } type URLRedirectExpectations struct { scheme string uri string expectedRedirectURI string } // TestHttpsRedirection tests to make sure that webserver and php redirect to correct // scheme (http or https). func TestHttpsRedirection(t *testing.T) { // Set up tests and give ourselves a working directory. assert := asrt.New(t) testcommon.ClearDockerEnv() packageDir, _ := os.Getwd() testDir := testcommon.CreateTmpDir("TestHttpsRedirection") defer testcommon.CleanupDir(testDir) appDir := filepath.Join(testDir, "proj") err := fileutil.CopyDir(filepath.Join(packageDir, "testdata", "TestHttpsRedirection"), appDir) assert.NoError(err) err = os.Chdir(appDir) assert.NoError(err) app, err := ddevapp.NewApp(appDir, ddevapp.DefaultProviderName) assert.NoError(err) app.Name = "proj" app.Type = "php" expectations := []URLRedirectExpectations{ {"https", "/subdir", "/subdir/"}, {"https", "/redir_abs.php", "/landed.php"}, {"https", "/redir_relative.php", "/landed.php"}, {"http", "/subdir", "/subdir/"}, {"http", "/redir_abs.php", "/landed.php"}, {"http", "/redir_relative.php", "/landed.php"}, } for _, webserverType := range []string{"nginx-fpm", "apache-fpm", "apache-cgi"} { app.WebserverType = webserverType err = app.WriteConfig() assert.NoError(err) // Do a start on the configured site. app, err = ddevapp.GetActiveApp("") assert.NoError(err) err = app.Start() assert.NoError(err) // Test for directory redirects under https and http for _, parts := range expectations { reqURL := parts.scheme + "://" + app.GetHostname() + parts.uri // nolint: vetshadow _, resp, err := testcommon.GetLocalHTTPResponse(t, reqURL) assert.Error(err) assert.NotNil(resp, "resp was nil for webserver_type=%s url=%s", webserverType, reqURL) if resp != nil { locHeader := resp.Header.Get("Location") expectedRedirect := parts.expectedRedirectURI // However, if we're hitting redir_abs.php (or apache hitting directory), the redirect will be the whole url. if strings.Contains(parts.uri, "redir_abs.php") || webserverType != "nginx-fpm" { expectedRedirect = parts.scheme + "://" + app.GetHostname() + parts.expectedRedirectURI } // Except the php relative redirect is always relative. if strings.Contains(parts.uri, "redir_relative.php") { expectedRedirect = parts.expectedRedirectURI } assert.EqualValues(locHeader, expectedRedirect, "For webserver_type %s url %s expected redirect %s != actual %s", webserverType, reqURL, expectedRedirect, locHeader) } } } err = app.Down(true, false) assert.NoError(err) // Change back to package dir. Lots of things will have to be cleaned up // in defers, and for windows we have to not be sitting in them. err = os.Chdir(packageDir) assert.NoError(err) } // TestMultipleComposeFiles checks to see if a set of docker-compose files gets // properly loaded in the right order, with docker-compose.yaml first and // with docker-compose.override.yaml last. func TestMultipleComposeFiles(t *testing.T) { // Set up tests and give ourselves a working directory. assert := asrt.New(t) // Make sure that valid yaml files get properly loaded in the proper order app, err := ddevapp.NewApp("./testdata/testMultipleComposeFiles", "") assert.NoError(err) files, err := app.ComposeFiles() assert.NoError(err) assert.True(files[0] == filepath.Join(app.AppConfDir(), "docker-compose.yaml")) assert.True(files[len(files)-1] == filepath.Join(app.AppConfDir(), "docker-compose.override.yaml")) // Make sure that some docker-compose.yml and docker-compose.yaml conflict gets noted properly app, err = ddevapp.NewApp("./testdata/testConflictingYamlYml", "") assert.NoError(err) _, err = app.ComposeFiles() assert.Error(err) if err != nil { assert.Contains(err.Error(), "there are more than one docker-compose.y*l") } // Make sure that some docker-compose.override.yml and docker-compose.override.yaml conflict gets noted properly app, err = ddevapp.NewApp("./testdata/testConflictingOverrideYaml", "") assert.NoError(err) _, err = app.ComposeFiles() assert.Error(err) if err != nil { assert.Contains(err.Error(), "there are more than one docker-compose.override.y*l") } // Make sure the error gets pointed out of there's no main docker-compose.yaml app, err = ddevapp.NewApp("./testdata/testNoDockerCompose", "") assert.NoError(err) _, err = app.ComposeFiles() assert.Error(err) if err != nil { assert.Contains(err.Error(), "failed to find a docker-compose.yml or docker-compose.yaml") } // Catch if we have no docker files at all. // This should also fail if the docker-compose.yaml.bak gets loaded. app, err = ddevapp.NewApp("./testdata/testNoDockerFilesAtAll", "") assert.NoError(err) _, err = app.ComposeFiles() assert.Error(err) if err != nil { assert.Contains(err.Error(), "failed to load any docker-compose.*y*l files") } } // TestGetAllURLs ensures the GetAllURLs function returns the expected number of URLs, // and that one of them is the direct web container address. func TestGetAllURLs(t *testing.T) { assert := asrt.New(t) for _, site := range TestSites { runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s GetAllURLs", site.Name)) testcommon.ClearDockerEnv() app := new(ddevapp.DdevApp) err := app.Init(site.Dir) assert.NoError(err) // Add some additional hostnames app.AdditionalHostnames = []string{ fmt.Sprintf("sub1.%s", site.Name), fmt.Sprintf("sub2.%s", site.Name), fmt.Sprintf("sub3.%s", site.Name), } err = app.WriteConfig() assert.NoError(err) err = app.Start() assert.NoError(err) urls := app.GetAllURLs() // Convert URLs to map[string]bool urlMap := make(map[string]bool) for _, u := range urls { urlMap[u] = true } // We expect two URLs for each hostname (http/https) and one direct web container address. expectedNumUrls := (2 * len(app.GetHostnames())) + 1 assert.Equal(len(urlMap), expectedNumUrls, "Unexpected number of URLs returned: %d", len(urlMap)) // Ensure urlMap contains direct address of the web container webContainer, err := app.FindContainerByType("web") assert.NoError(err) dockerIP, err := dockerutil.GetDockerIP() assert.NoError(err) // Find HTTP port of web container var port docker.APIPort for _, p := range webContainer.Ports { if p.PrivatePort == 80 { port = p break } } expectedDirectAddress := fmt.Sprintf("http://%s:%d", dockerIP, port.PublicPort) exists := urlMap[expectedDirectAddress] assert.True(exists, "URL list for app: %s does not contain direct web container address: %s", app.Name, expectedDirectAddress) // Multiple projects can't run at the same time with the fqdns, so we need to clean // up these for tests that run later. app.AdditionalFQDNs = []string{} app.AdditionalHostnames = []string{} err = app.WriteConfig() assert.NoError(err) err = app.Down(true, false) assert.NoError(err) runTime() } } // TestWebserverType checks that webserver_type:apache-cgi or apache-fpm does the right thing func TestWebserverType(t *testing.T) { assert := asrt.New(t) for _, site := range TestSites { runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s GetAllURLs", site.Name)) app := new(ddevapp.DdevApp) err := app.Init(site.Dir) assert.NoError(err) // Copy our phpinfo into the docroot of testsite. pwd, err := os.Getwd() assert.NoError(err) err = fileutil.CopyFile(filepath.Join(pwd, "testdata", "servertype.php"), filepath.Join(app.AppRoot, app.Docroot, "servertype.php")) assert.NoError(err) for _, app.WebserverType = range []string{"apache-fpm", "apache-cgi", "nginx-fpm"} { err = app.WriteConfig() assert.NoError(err) testcommon.ClearDockerEnv() err = app.Start() assert.NoError(err) // nolint: vetshadow out, resp, err := testcommon.GetLocalHTTPResponse(t, app.GetWebContainerDirectURL()+"/servertype.php") assert.NoError(err) expectedServerType := "Apache/2" if app.WebserverType == "nginx-fpm" { expectedServerType = "nginx" } assert.Contains(resp.Header["Server"][0], expectedServerType, "Server header for project=%s, app.WebserverType=%s should be %s", app.Name, app.WebserverType, expectedServerType) assert.Contains(out, expectedServerType, "For app.WebserverType=%s phpinfo expected servertype.php to show %s", app.WebserverType, expectedServerType) } // Set the apptype back to whatever the default was so we don't break any following tests. testVar := os.Getenv("DDEV_TEST_WEBSERVER_TYPE") if testVar != "" { app.WebserverType = testVar err = app.WriteConfig() assert.NoError(err) } err = app.Down(true, false) assert.NoError(err) runTime() } } // TestDbMigration tests migration from bind-mounted db to volume-mounted db // This should be important around the time of its release, 2018-08-02 or so, but should be increasingly // irrelevant after that and can eventually be removed. func TestDbMigration(t *testing.T) { assert := asrt.New(t) runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("TestDbMigration")) app := &ddevapp.DdevApp{} dbMigrationTarball, err := filepath.Abs(filepath.Join("testdata", "db_migration", "d7_to_migrate.tgz")) assert.NoError(err) // Use d7 only for this test site := FullTestSites[2] // If running this with GOTEST_SHORT we have to create the directory, tarball etc. if site.Dir == "" || !fileutil.FileExists(site.Dir) { err = site.Prepare() if err != nil { t.Fatalf("Prepare() failed on TestSite.Prepare() site=%s, err=%v", site.Name, err) } } switchDir := site.Chdir() testcommon.ClearDockerEnv() err = app.Init(site.Dir) assert.NoError(err) dataDir := filepath.Join(util.GetGlobalDdevDir(), app.Name, "mysql") // Remove any existing dataDir or migration backups if fileutil.FileExists(dataDir) { err = os.RemoveAll(dataDir) assert.NoError(err) } if fileutil.FileExists(dataDir + "_migrated.bak") { err = os.RemoveAll(dataDir + "_migrated.bak") assert.NoError(err) } // Untar the to-migrate db into old-style dataDir (~/.ddev/projectname/mysql) err = os.MkdirAll(dataDir, 0755) assert.NoError(err) err = archive.Untar(dbMigrationTarball, dataDir, "") assert.NoError(err) defer os.RemoveAll(dataDir) _, err = app.CreateSettingsFile() assert.NoError(err) // app.Start() will discover the mysql directory and migrate it to a snapshot. err = app.Start() assert.Error(err) assert.Contains(err.Error(), "it is not possible to migrate bind-mounted") runTime() switchDir() } // TestInternalAndExternalAccessToURL checks we can access content from host and from inside container by URL (with port) func TestInternalAndExternalAccessToURL(t *testing.T) { assert := asrt.New(t) for _, site := range TestSites { runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s TestInternalAndExternalAccessToURL", site.Name)) app := new(ddevapp.DdevApp) err := app.Init(site.Dir) assert.NoError(err) //nolint: vet for _, pair := range []testcommon.PortPair{{"80", "443"}, {"8080", "8443"}} { testcommon.ClearDockerEnv() app.RouterHTTPPort = pair.HTTPPort app.RouterHTTPSPort = pair.HTTPSPort err = app.WriteConfig() assert.NoError(err) if app.SiteStatus() == ddevapp.SiteStopped || app.SiteStatus() == ddevapp.SiteRunning { err = app.Down(true, false) assert.NoError(err) } err = app.Start() assert.NoError(err) // Ensure that we can access from the host even with extra port specifications. testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL()+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect) testcommon.EnsureLocalHTTPContent(t, app.GetHTTPSURL()+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect) // Ensure that we can access the same URL from within the web container (via router) var out string out, _, err = app.Exec("web", "curl", "-sk", app.GetHTTPURL()+site.Safe200URIWithExpectation.URI) assert.NoError(err) assert.Contains(out, site.Safe200URIWithExpectation.Expect) out, _, err = app.Exec("web", "curl", "-sk", app.GetHTTPSURL()+site.Safe200URIWithExpectation.URI) assert.NoError(err) assert.Contains(out, site.Safe200URIWithExpectation.Expect) } // Set the ports back to the default was so we don't break any following tests. app.RouterHTTPSPort = "443" app.RouterHTTPPort = "80" err = app.WriteConfig() assert.NoError(err) err = app.Down(true, false) assert.NoError(err) runTime() } } // constructContainerName builds a container name given the type (web/db/dba) and the app func constructContainerName(containerType string, app *ddevapp.DdevApp) (string, error) { container, err := app.FindContainerByType(containerType) if err != nil { return "", err } name := dockerutil.ContainerName(container) return name, nil } func removeAllErrCheck(path string, assert *asrt.Assertions) { err := os.RemoveAll(path) assert.NoError(err) }
1
12,767
This is odd because it's actually in our generated SiteSettingsPath, not in the SiteLocalSettingsPath (wp-config-ddev.php). Are these two files swapped somehow? I'd expect SiteSettingsPath to be wp-config.php and SiteLocalSettingsPath to be wp-config-ddev.php. BTW, I'm *way* ok with renaming that to SiteDdevSettingsPath.
drud-ddev
php
@@ -26,10 +26,13 @@ import ( "os/exec" "path/filepath" "strings" + + ecpb "kythe.io/kythe/proto/extraction_config_go_proto" ) // kytheConfigFileName The name of the Kythe extraction config const kytheExtractionConfigFile = ".kythe-extraction-config" +const defaultConfigDir = "kythe/go/extractors/config/default" // Repo is a container of input/output parameters for doing extraction on remote // repositories.
1
/* * Copyright 2018 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package config import ( "context" "fmt" "io" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" ) // kytheConfigFileName The name of the Kythe extraction config const kytheExtractionConfigFile = ".kythe-extraction-config" // Repo is a container of input/output parameters for doing extraction on remote // repositories. type Repo struct { // Clone extracts a copy of the repo to the specified output Directory. Clone func(ctx context.Context, outputDir string) error // Where to write from an extraction. OutputPath string // An optional path to a file containing a // kythe.proto.ExtractionConfiguration encoded as JSON that details how // to perform extraction. ConfigPath string } // GitCopier returns a function that clones a repository via git command line. func GitCopier(repoURI string) func(ctx context.Context, outputDir string) error { return func(ctx context.Context, outputDir string) error { // TODO(danielmoy): strongly consider go-git instead of os.exec return exec.CommandContext(ctx, "git", "clone", repoURI, outputDir).Run() } } // LocalCopier returns a function that copies a local repository. // This function assumes the eventual output directory is already created. func LocalCopier(repoPath string) func(ctx context.Context, outputDir string) error { return func(ctx context.Context, outputDir string) error { gitDir := filepath.Join(repoPath, ".git") // TODO(danielmoy): consider extracting all or part of this // to a more common place. return filepath.Walk(repoPath, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if repoPath == path { // Intentionally do nothing for base dir. return nil } if filepath.HasPrefix(path, gitDir) { return filepath.SkipDir } rel, err := filepath.Rel(repoPath, path) if err != nil { return err } outPath := filepath.Join(outputDir, rel) if info.Mode().IsRegular() { if err := os.MkdirAll(filepath.Dir(outPath), 0755); err != nil { return fmt.Errorf("failed to make dir: %v", err) } inf, err := os.Open(path) if err != nil { return fmt.Errorf("failed to open input file from repo: %v", err) } defer inf.Close() of, err := os.Create(outPath) if err != nil { return fmt.Errorf("failed to open output file for repo copy: %v", err) } if _, err := io.Copy(of, inf); err != nil { of.Close() return fmt.Errorf("failed to copy repo file: %v", err) } return of.Close() } else if !info.IsDir() { // Notably in here are any links or other odd things. log.Printf("Unsupported file %s with mode %s\n", path, info.Mode()) } return nil }) } } // Extractor is the interface for handling kindex generation on repos. // // ExtractRepo takes an input repo, output path to a directory, and optional // kythe.proto.ExtractionConfiguration file path, and performs kythe extraction // on the repo, depositing results in the output directory path. type Extractor func(ctx context.Context, repo Repo) error // ExtractRepo extracts a given code repository and outputs kindex files. // // It makes a local clone of the repository. It optionally uses a passed // extraction config, otherwise it attempts to find a Kythe config named // ".kythe-extraction-config". // // It builds a one-off customized Docker image for extraction, and then runs it, // generating kindex files (format defined here: // http://kythe.io/docs/kythe-index-pack.html). // // This function requires both Git and Docker to be in $PATH during execution. func ExtractRepo(ctx context.Context, repo Repo) error { if err := verifyRequiredTools(); err != nil { return fmt.Errorf("ExtractRepo requires git and docker to be in $PATH: %v", err) } // create a temporary directory for the repo clone repoDir, err := ioutil.TempDir("", "repoDir") if err != nil { return fmt.Errorf("creating tmp repo dir: %v", err) } defer os.RemoveAll(repoDir) // create a temporary directory for the extraction output tmpOutDir, err := ioutil.TempDir("", "tmpOutDir") if err != nil { return fmt.Errorf("creating tmp out dir: %v", err) } defer os.RemoveAll(tmpOutDir) // copy the repo into our temp directory, so we can mutate its // build config without affecting the original source. if err := repo.Clone(ctx, repoDir); err != nil { return fmt.Errorf("copying repo: %v", err) } // if a config was passed as a arg, use the specified config if repo.ConfigPath == "" { // otherwise, use a Kythe config within the repo (if it exists) repo.ConfigPath = filepath.Join(repoDir, kytheExtractionConfigFile) } log.Printf("Using configuration file: %q\n", repo.ConfigPath) extractionDockerFile, err := ioutil.TempFile(tmpOutDir, "extractionDockerFile") if err != nil { return fmt.Errorf("creating tmp Dockerfile: %v", err) } // generate an extraction image from the config configFile, err := os.Open(repo.ConfigPath) if err != nil { return fmt.Errorf("opening config file: %v", err) } extractionConfig, err := Load(configFile) if err != nil { return fmt.Errorf("loading extraction config: %v", err) } err = CreateImage(extractionDockerFile.Name(), extractionConfig) if err != nil { return fmt.Errorf("creating extraction image: %v", err) } // use Docker to build the extraction image imageTag := strings.ToLower(filepath.Base(extractionDockerFile.Name())) output, err := exec.CommandContext(ctx, "docker", "build", "-f", extractionDockerFile.Name(), "-t", imageTag, tmpOutDir).CombinedOutput() defer mustCleanUpImage(ctx, imageTag) if err != nil { return fmt.Errorf("building docker image: %v\nCommand output %s", err, string(output)) } // run the extraction output, err = exec.CommandContext(ctx, "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", repoDir, DefaultRepoVolume), "-v", fmt.Sprintf("%s:%s", repo.OutputPath, DefaultOutputVolume), "-t", imageTag).CombinedOutput() if err != nil { return fmt.Errorf("extracting repo: %v\nCommand output: %s", err, string(output)) } return nil } func verifyRequiredTools() error { if _, err := exec.LookPath("git"); err != nil { return err } if _, err := exec.LookPath("docker"); err != nil { return err } return nil } func mustCleanUpImage(ctx context.Context, tmpImageTag string) { cmd := exec.CommandContext(ctx, "docker", "image", "rm", tmpImageTag) err := cmd.Run() if err != nil { log.Printf("Failed to clean up docker image: %v", err) } }
1
8,451
It seems like this must necessarily be a stopgap. Can you please add a TODO(#xyz) to point to the appropriate issue?
kythe-kythe
go
@@ -29,12 +29,15 @@ func Agent(config *config.Agent) error { defer logs.FlushLogs() startKubelet(config) - startKubeProxy(config) + + if !config.DisableKubeProxy { + return startKubeProxy(config) + } return nil } -func startKubeProxy(cfg *config.Agent) { +func startKubeProxy(cfg *config.Agent) error { argsMap := map[string]string{ "proxy-mode": "iptables", "healthz-bind-address": "127.0.0.1",
1
package agent import ( "bufio" "context" "math/rand" "os" "path/filepath" "strings" "time" "github.com/opencontainers/runc/libcontainer/system" "github.com/rancher/k3s/pkg/daemons/config" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/net" "k8s.io/component-base/logs" proxy "k8s.io/kubernetes/cmd/kube-proxy/app" kubelet "k8s.io/kubernetes/cmd/kubelet/app" "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" _ "k8s.io/component-base/metrics/prometheus/restclient" // for client metric registration _ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration ) func Agent(config *config.Agent) error { rand.Seed(time.Now().UTC().UnixNano()) logs.InitLogs() defer logs.FlushLogs() startKubelet(config) startKubeProxy(config) return nil } func startKubeProxy(cfg *config.Agent) { argsMap := map[string]string{ "proxy-mode": "iptables", "healthz-bind-address": "127.0.0.1", "kubeconfig": cfg.KubeConfigKubeProxy, "cluster-cidr": cfg.ClusterCIDR.String(), } if cfg.NodeName != "" { argsMap["hostname-override"] = cfg.NodeName } args := config.GetArgsList(argsMap, cfg.ExtraKubeProxyArgs) command := proxy.NewProxyCommand() command.SetArgs(args) go func() { logrus.Infof("Running kube-proxy %s", config.ArgString(args)) logrus.Fatalf("kube-proxy exited: %v", command.Execute()) }() } func startKubelet(cfg *config.Agent) { argsMap := map[string]string{ "healthz-bind-address": "127.0.0.1", "read-only-port": "0", "cluster-domain": cfg.ClusterDomain, "kubeconfig": cfg.KubeConfigKubelet, "eviction-hard": "imagefs.available<5%,nodefs.available<5%", "eviction-minimum-reclaim": "imagefs.available=10%,nodefs.available=10%", "fail-swap-on": "false", //"cgroup-root": "/k3s", "cgroup-driver": "cgroupfs", "authentication-token-webhook": "true", "anonymous-auth": "false", "authorization-mode": modes.ModeWebhook, } if cfg.RootDir != "" { argsMap["root-dir"] = cfg.RootDir argsMap["cert-dir"] = filepath.Join(cfg.RootDir, "pki") argsMap["seccomp-profile-root"] = filepath.Join(cfg.RootDir, "seccomp") } if cfg.CNIConfDir != "" { argsMap["cni-conf-dir"] = cfg.CNIConfDir } if cfg.CNIBinDir != "" { argsMap["cni-bin-dir"] = cfg.CNIBinDir } if cfg.CNIPlugin { argsMap["network-plugin"] = "cni" } if len(cfg.ClusterDNS) > 0 { argsMap["cluster-dns"] = cfg.ClusterDNS.String() } if cfg.ResolvConf != "" { argsMap["resolv-conf"] = cfg.ResolvConf } if cfg.RuntimeSocket != "" { argsMap["container-runtime"] = "remote" argsMap["container-runtime-endpoint"] = cfg.RuntimeSocket argsMap["containerd"] = cfg.RuntimeSocket argsMap["serialize-image-pulls"] = "false" } else if cfg.PauseImage != "" { argsMap["pod-infra-container-image"] = cfg.PauseImage } if cfg.ListenAddress != "" { argsMap["address"] = cfg.ListenAddress } if cfg.ClientCA != "" { argsMap["anonymous-auth"] = "false" argsMap["client-ca-file"] = cfg.ClientCA } if cfg.ServingKubeletCert != "" && cfg.ServingKubeletKey != "" { argsMap["tls-cert-file"] = cfg.ServingKubeletCert argsMap["tls-private-key-file"] = cfg.ServingKubeletKey } if cfg.NodeName != "" { argsMap["hostname-override"] = cfg.NodeName } defaultIP, err := net.ChooseHostInterface() if err != nil || defaultIP.String() != cfg.NodeIP { argsMap["node-ip"] = cfg.NodeIP } root, hasCFS, hasPIDs := checkCgroups() if !hasCFS { logrus.Warn("Disabling CPU quotas due to missing cpu.cfs_period_us") argsMap["cpu-cfs-quota"] = "false" } if !hasPIDs { logrus.Warn("Disabling pod PIDs limit feature due to missing cgroup pids support") argsMap["cgroups-per-qos"] = "false" argsMap["enforce-node-allocatable"] = "" argsMap["feature-gates"] = addFeatureGate(argsMap["feature-gates"], "SupportPodPidsLimit=false") } if root != "" { argsMap["runtime-cgroups"] = root argsMap["kubelet-cgroups"] = root } if system.RunningInUserNS() { argsMap["feature-gates"] = addFeatureGate(argsMap["feature-gates"], "DevicePlugins=false") } argsMap["node-labels"] = strings.Join(cfg.NodeLabels, ",") if len(cfg.NodeTaints) > 0 { argsMap["register-with-taints"] = strings.Join(cfg.NodeTaints, ",") } if !cfg.DisableCCM { argsMap["cloud-provider"] = "external" } if cfg.Rootless { // flags are from https://github.com/rootless-containers/usernetes/blob/v20190826.0/boot/kubelet.sh argsMap["cgroup-driver"] = "none" argsMap["feature-gates=SupportNoneCgroupDriver"] = "true" argsMap["cgroups-per-qos"] = "false" argsMap["enforce-node-allocatable"] = "" } args := config.GetArgsList(argsMap, cfg.ExtraKubeletArgs) command := kubelet.NewKubeletCommand(context.Background().Done()) command.SetArgs(args) go func() { logrus.Infof("Running kubelet %s", config.ArgString(args)) logrus.Fatalf("kubelet exited: %v", command.Execute()) }() } func addFeatureGate(current, new string) string { if current == "" { return new } return current + "," + new } func checkCgroups() (root string, hasCFS bool, hasPIDs bool) { f, err := os.Open("/proc/self/cgroup") if err != nil { return "", false, false } defer f.Close() scan := bufio.NewScanner(f) for scan.Scan() { parts := strings.Split(scan.Text(), ":") if len(parts) < 3 { continue } systems := strings.Split(parts[1], ",") for _, system := range systems { if system == "pids" { hasPIDs = true } else if system == "cpu" { p := filepath.Join("/sys/fs/cgroup", parts[1], parts[2], "cpu.cfs_period_us") if _, err := os.Stat(p); err == nil { hasCFS = true } } else if system == "name=systemd" { last := parts[len(parts)-1] i := strings.LastIndex(last, ".slice") if i > 0 { root = "/systemd" + last[:i+len(".slice")] } else { root = "/systemd" } } } } return root, hasCFS, hasPIDs }
1
8,037
Is the return signature necessary since we never actually return if there is a problem (I'm referring to the call to `logrus.Fatalf`). Let's pick a pattern and stick with it.
k3s-io-k3s
go
@@ -5,6 +5,8 @@ export default Ember.Controller.extend({ barcodeUri: function() { let id = this.get('model.id'); let name = this.get('model.name'); + + /* eslint new-cap: ['error', { 'capIsNew': false }] */ return Ember.$(document).JsBarcode(id, { width: 1, height: 20,
1
import Ember from 'ember'; export default Ember.Controller.extend({ selectedPrinter: null, barcodeUri: function() { let id = this.get('model.id'); let name = this.get('model.name'); return Ember.$(document).JsBarcode(id, { width: 1, height: 20, fontSize: 10, displayValue: name, returnUri: true }); }.property('model.id', 'model.name'), printers: function() { return dymo.label.framework.getTapePrinters(); }.property(), havePrinters: function() { let printers = this.get('printers'); if (printers.length > 0) { return true; } else { return false; } }.property('printers'), singlePrinter: function() { let printers = this.get('printers'); if (printers.length === 1) { return true; } else { return false; } }.property('printers'), actions: { print: function() { let barcodeUri = this.get('barcodeUri'); let selectedPrinter = this.get('selectedPrinter'); if (!selectedPrinter) { selectedPrinter = this.get('printers')[0].name; } Ember.$.get('/dymo/BarcodeAsImage.label', function(labelXml) { let barcodeAsImageLabel = dymo.label.framework.openLabelXml(labelXml); let pngBase64 = barcodeUri.substr('data:image/png;base64,'.length); barcodeAsImageLabel.setObjectText('Image', pngBase64); barcodeAsImageLabel.print(selectedPrinter); }, 'text'); } } });
1
13,217
@btecu why is this override needed here? I'm not seeing a `new` being used here.
HospitalRun-hospitalrun-frontend
js
@@ -2527,7 +2527,11 @@ func (s *Server) leafNodeFinishConnectProcess(c *client) { c.mu.Unlock() // Make sure we register with the account here. - c.registerWithAccount(acc) + if err := c.registerWithAccount(acc); err != nil { + c.Errorf("Registering leaf with account %s resulted in error: %v", acc.Name, err) + c.closeConnection(ProtocolViolation) + return + } s.addLeafNodeConnection(c, _EMPTY_, _EMPTY_, false) s.initLeafNodeSmapAndSendSubs(c) if sendSysConnectEvent {
1
// Copyright 2019-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "bufio" "bytes" "crypto/tls" "encoding/base64" "encoding/json" "fmt" "io/ioutil" "math/rand" "net" "net/http" "net/url" "reflect" "regexp" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/nats-io/nkeys" "github.com/nats-io/nuid" ) // Warning when user configures leafnode TLS insecure const leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!" // When a loop is detected, delay the reconnect of solicited connection. const leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second // When a server receives a message causing a permission violation, the // connection is closed and it won't attempt to reconnect for that long. const leafNodeReconnectAfterPermViolation = 30 * time.Second // Prefix for loop detection subject const leafNodeLoopDetectionSubjectPrefix = "$LDS." // Path added to URL to indicate to WS server that the connection is a // LEAF connection as opposed to a CLIENT. const leafNodeWSPath = "/leafnode" type leaf struct { // We have any auth stuff here for solicited connections. remote *leafNodeCfg // isSpoke tells us what role we are playing. // Used when we receive a connection but otherside tells us they are a hub. isSpoke bool // remoteCluster is when we are a hub but the spoke leafnode is part of a cluster. remoteCluster string // remoteServer holds onto the remove server's name or ID. remoteServer string // Used to suppress sub and unsub interest. Same as routes but our audience // here is tied to this leaf node. This will hold all subscriptions except this // leaf nodes. This represents all the interest we want to send to the other side. smap map[string]int32 // This map will contain all the subscriptions that have been added to the smap // during initLeafNodeSmapAndSendSubs. It is short lived and is there to avoid // race between processing of a sub where sub is added to account sublist but // updateSmap has not be called on that "thread", while in the LN readloop, // when processing CONNECT, initLeafNodeSmapAndSendSubs is invoked and add // this subscription to smap. When processing of the sub then calls updateSmap, // we would add it a second time in the smap causing later unsub to suppress the LS-. tsub map[*subscription]struct{} tsubt *time.Timer } // Used for remote (solicited) leafnodes. type leafNodeCfg struct { sync.RWMutex *RemoteLeafOpts urls []*url.URL curURL *url.URL tlsName string username string password string perms *Permissions connDelay time.Duration // Delay before a connect, could be used while detecting loop condition, etc.. } // Check to see if this is a solicited leafnode. We do special processing for solicited. func (c *client) isSolicitedLeafNode() bool { return c.kind == LEAF && c.leaf.remote != nil } // Returns true if this is a solicited leafnode and is not configured to be treated as a hub or a receiving // connection leafnode where the otherside has declared itself to be the hub. func (c *client) isSpokeLeafNode() bool { return c.kind == LEAF && c.leaf.isSpoke } func (c *client) isHubLeafNode() bool { return c.kind == LEAF && !c.leaf.isSpoke } // Will add in the deny exports and imports for JetStream on solicited connections if we // are sharing the system account and wanting to extend the JS domain. // r lock should be held. func (s *Server) addInJSDeny(r *leafNodeCfg) { s.addInJSDenyExport(r) s.addInJSDenyImport(r) } // Will add in the deny export for JetStream on solicited connections if we // detect we have multiple JetStream domains and we know our local account // is JetStream enabled. // r lock should be held. func (s *Server) addInJSDenyExport(r *leafNodeCfg) { for _, dsubj := range r.DenyExports { if dsubj == jsAllAPI { return } } s.Noticef("Adding deny export of %q for leafnode configuration on %q that bridges system account", jsAllAPI, r.LocalAccount) r.DenyExports = append(r.DenyExports, jsAllAPI) // We added in some deny clauses here so need to regenerate the permissions etc. perms := &Permissions{} perms.Publish = &SubjectPermission{Deny: r.DenyExports} if len(r.DenyImports) > 0 { perms.Subscribe = &SubjectPermission{Deny: r.DenyImports} } r.perms = perms } // Will add in the deny import for JetStream on solicited connections if we // detect we have multiple JetStream domains and we know our local account // is JetStream enabled. // r lock should be held. func (s *Server) addInJSDenyImport(r *leafNodeCfg) { for _, dsubj := range r.DenyImports { if dsubj == jsAllAPI { return } } s.Noticef("Adding deny import of %q for leafnode configuration on %q that bridges system account", jsAllAPI, r.LocalAccount) r.DenyImports = append(r.DenyImports, jsAllAPI) // We added in some deny clauses here so need to regenerate the permissions etc. perms := &Permissions{} perms.Subscribe = &SubjectPermission{Deny: r.DenyImports} if len(r.DenyExports) > 0 { perms.Publish = &SubjectPermission{Deny: r.DenyExports} } r.perms = perms } // Used for $SYS accounts when sharing but using separate JS domains. // r lock should be held. func (s *Server) addInJSDenyAll(r *leafNodeCfg) { denyAll := []string{jscAllSubj, raftAllSubj, jsAllAPI} s.Noticef("Sharing system account but utilizing separate JetStream Domains") s.Noticef("Adding deny of %+v for leafnode configuration that bridges system account", denyAll) hasDeny := func(deny string, l []string) bool { for _, le := range l { if le == deny { return true } } return false } var exportAdded, importAdded bool for _, deny := range denyAll { if !hasDeny(deny, r.DenyExports) { r.DenyExports = append(r.DenyExports, deny) exportAdded = true } if !hasDeny(deny, r.DenyImports) { r.DenyImports = append(r.DenyImports, deny) importAdded = true } } if !exportAdded && !importAdded { return } perms := &Permissions{} if exportAdded { perms.Publish = &SubjectPermission{Deny: r.DenyExports} } if importAdded { perms.Subscribe = &SubjectPermission{Deny: r.DenyImports} } r.perms = perms } // Determine if we are sharing our local system account with the remote. func (s *Server) hasSystemRemoteLeaf() bool { s.mu.Lock() defer s.mu.Unlock() return s.hasSystemRemoteLeafLocked() != nil } func (s *Server) hasSystemRemoteLeafLocked() *leafNodeCfg { if s.sys == nil { return nil } sacc := s.sys.account.Name for _, r := range s.leafRemoteCfgs { r.RLock() lacc := r.LocalAccount r.RUnlock() if lacc == sacc { return r } } return nil } // This will spin up go routines to solicit the remote leaf node connections. func (s *Server) solicitLeafNodeRemotes(remotes []*RemoteLeafOpts) { for _, r := range remotes { s.mu.Lock() remote := newLeafNodeCfg(r) s.leafRemoteCfgs = append(s.leafRemoteCfgs, remote) s.mu.Unlock() s.startGoRoutine(func() { s.connectToRemoteLeafNode(remote, true) }) } } func (s *Server) remoteLeafNodeStillValid(remote *leafNodeCfg) bool { for _, ri := range s.getOpts().LeafNode.Remotes { // FIXME(dlc) - What about auth changes? if reflect.DeepEqual(ri.URLs, remote.URLs) { return true } } return false } // Ensure that leafnode is properly configured. func validateLeafNode(o *Options) error { if err := validateLeafNodeAuthOptions(o); err != nil { return err } // In local config mode, check that leafnode configuration refers to accounts that exist. if len(o.TrustedOperators) == 0 { accNames := map[string]struct{}{} for _, a := range o.Accounts { accNames[a.Name] = struct{}{} } // global account is always created accNames[DEFAULT_GLOBAL_ACCOUNT] = struct{}{} // in the context of leaf nodes, empty account means global account accNames[_EMPTY_] = struct{}{} // system account either exists or, if not disabled, will be created if o.SystemAccount == _EMPTY_ && !o.NoSystemAccount { accNames[DEFAULT_SYSTEM_ACCOUNT] = struct{}{} } checkAccountExists := func(accName string, cfgType string) error { if _, ok := accNames[accName]; !ok { return fmt.Errorf("cannot find local account %q specified in leafnode %s", accName, cfgType) } return nil } if err := checkAccountExists(o.LeafNode.Account, "authorization"); err != nil { return err } for _, lu := range o.LeafNode.Users { if lu.Account == nil { // means global account continue } if err := checkAccountExists(lu.Account.Name, "authorization"); err != nil { return err } } for _, r := range o.LeafNode.Remotes { if err := checkAccountExists(r.LocalAccount, "remote"); err != nil { return err } } } else { if len(o.LeafNode.Users) != 0 { return fmt.Errorf("operator mode does not allow specifying user in leafnode config") } for _, r := range o.LeafNode.Remotes { if !nkeys.IsValidPublicAccountKey(r.LocalAccount) { return fmt.Errorf( "operator mode requires account nkeys in remotes. " + "Please add an `account` key to each remote in your `leafnodes` section, to assign it to an account. " + "Each account value should be a 56 character public key, starting with the letter 'A'") } } if o.LeafNode.Port != 0 && o.LeafNode.Account != "" && !nkeys.IsValidPublicAccountKey(o.LeafNode.Account) { return fmt.Errorf("operator mode and non account nkeys are incompatible") } } // If a remote has a websocket scheme, all need to have it. for _, rcfg := range o.LeafNode.Remotes { if len(rcfg.URLs) >= 2 { firstIsWS, ok := isWSURL(rcfg.URLs[0]), true for i := 1; i < len(rcfg.URLs); i++ { u := rcfg.URLs[i] if isWS := isWSURL(u); isWS && !firstIsWS || !isWS && firstIsWS { ok = false break } } if !ok { return fmt.Errorf("remote leaf node configuration cannot have a mix of websocket and non-websocket urls: %q", redactURLList(rcfg.URLs)) } } } if o.LeafNode.Port == 0 { return nil } if o.Gateway.Name == "" && o.Gateway.Port == 0 { return nil } // If we are here we have both leaf nodes and gateways defined, make sure there // is a system account defined. if o.SystemAccount == "" { return fmt.Errorf("leaf nodes and gateways (both being defined) require a system account to also be configured") } if err := validatePinnedCerts(o.LeafNode.TLSPinnedCerts); err != nil { return fmt.Errorf("leafnode: %v", err) } return nil } // Used to validate user names in LeafNode configuration. // - rejects mix of single and multiple users. // - rejects duplicate user names. func validateLeafNodeAuthOptions(o *Options) error { if len(o.LeafNode.Users) == 0 { return nil } if o.LeafNode.Username != _EMPTY_ { return fmt.Errorf("can not have a single user/pass and a users array") } users := map[string]struct{}{} for _, u := range o.LeafNode.Users { if _, exists := users[u.Username]; exists { return fmt.Errorf("duplicate user %q detected in leafnode authorization", u.Username) } users[u.Username] = struct{}{} } return nil } // Update remote LeafNode TLS configurations after a config reload. func (s *Server) updateRemoteLeafNodesTLSConfig(opts *Options) { max := len(opts.LeafNode.Remotes) if max == 0 { return } s.mu.Lock() defer s.mu.Unlock() // Changes in the list of remote leaf nodes is not supported. // However, make sure that we don't go over the arrays. if len(s.leafRemoteCfgs) < max { max = len(s.leafRemoteCfgs) } for i := 0; i < max; i++ { ro := opts.LeafNode.Remotes[i] cfg := s.leafRemoteCfgs[i] if ro.TLSConfig != nil { cfg.Lock() cfg.TLSConfig = ro.TLSConfig.Clone() cfg.Unlock() } } } func (s *Server) reConnectToRemoteLeafNode(remote *leafNodeCfg) { delay := s.getOpts().LeafNode.ReconnectInterval select { case <-time.After(delay): case <-s.quitCh: s.grWG.Done() return } s.connectToRemoteLeafNode(remote, false) } // Creates a leafNodeCfg object that wraps the RemoteLeafOpts. func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg { cfg := &leafNodeCfg{ RemoteLeafOpts: remote, urls: make([]*url.URL, 0, len(remote.URLs)), } if len(remote.DenyExports) > 0 || len(remote.DenyImports) > 0 { perms := &Permissions{} if len(remote.DenyExports) > 0 { perms.Publish = &SubjectPermission{Deny: remote.DenyExports} } if len(remote.DenyImports) > 0 { perms.Subscribe = &SubjectPermission{Deny: remote.DenyImports} } cfg.perms = perms } // Start with the one that is configured. We will add to this // array when receiving async leafnode INFOs. cfg.urls = append(cfg.urls, cfg.URLs...) // If allowed to randomize, do it on our copy of URLs if !remote.NoRandomize { rand.Shuffle(len(cfg.urls), func(i, j int) { cfg.urls[i], cfg.urls[j] = cfg.urls[j], cfg.urls[i] }) } // If we are TLS make sure we save off a proper servername if possible. // Do same for user/password since we may need them to connect to // a bare URL that we get from INFO protocol. for _, u := range cfg.urls { cfg.saveTLSHostname(u) cfg.saveUserPassword(u) // If the url(s) have the "wss://" scheme, and we don't have a TLS // config, mark that we should be using TLS anyway. if !cfg.TLS && isWSSURL(u) { cfg.TLS = true } } return cfg } // Will pick an URL from the list of available URLs. func (cfg *leafNodeCfg) pickNextURL() *url.URL { cfg.Lock() defer cfg.Unlock() // If the current URL is the first in the list and we have more than // one URL, then move that one to end of the list. if cfg.curURL != nil && len(cfg.urls) > 1 && urlsAreEqual(cfg.curURL, cfg.urls[0]) { first := cfg.urls[0] copy(cfg.urls, cfg.urls[1:]) cfg.urls[len(cfg.urls)-1] = first } cfg.curURL = cfg.urls[0] return cfg.curURL } // Returns the current URL func (cfg *leafNodeCfg) getCurrentURL() *url.URL { cfg.RLock() defer cfg.RUnlock() return cfg.curURL } // Returns how long the server should wait before attempting // to solicit a remote leafnode connection. func (cfg *leafNodeCfg) getConnectDelay() time.Duration { cfg.RLock() delay := cfg.connDelay cfg.RUnlock() return delay } // Sets the connect delay. func (cfg *leafNodeCfg) setConnectDelay(delay time.Duration) { cfg.Lock() cfg.connDelay = delay cfg.Unlock() } // Ensure that non-exported options (used in tests) have // been properly set. func (s *Server) setLeafNodeNonExportedOptions() { opts := s.getOpts() s.leafNodeOpts.dialTimeout = opts.LeafNode.dialTimeout if s.leafNodeOpts.dialTimeout == 0 { // Use same timeouts as routes for now. s.leafNodeOpts.dialTimeout = DEFAULT_ROUTE_DIAL } s.leafNodeOpts.resolver = opts.LeafNode.resolver if s.leafNodeOpts.resolver == nil { s.leafNodeOpts.resolver = net.DefaultResolver } } const sharedSysAccDelay = 250 * time.Millisecond func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) { defer s.grWG.Done() if remote == nil || len(remote.URLs) == 0 { s.Debugf("Empty remote leafnode definition, nothing to connect") return } opts := s.getOpts() reconnectDelay := opts.LeafNode.ReconnectInterval s.mu.Lock() dialTimeout := s.leafNodeOpts.dialTimeout resolver := s.leafNodeOpts.resolver var isSysAcc bool if s.eventsEnabled() { isSysAcc = remote.LocalAccount == s.sys.account.Name } s.mu.Unlock() // If we are sharing a system account and we are not standalone delay to gather some info prior. if firstConnect && isSysAcc && !s.standAloneMode() { s.Debugf("Will delay first leafnode connect to shared system account due to clustering") remote.setConnectDelay(sharedSysAccDelay) } if connDelay := remote.getConnectDelay(); connDelay > 0 { select { case <-time.After(connDelay): case <-s.quitCh: return } remote.setConnectDelay(0) } var conn net.Conn const connErrFmt = "Error trying to connect as leafnode to remote server %q (attempt %v): %v" attempts := 0 for s.isRunning() && s.remoteLeafNodeStillValid(remote) { rURL := remote.pickNextURL() url, err := s.getRandomIP(resolver, rURL.Host, nil) if err == nil { var ipStr string if url != rURL.Host { ipStr = fmt.Sprintf(" (%s)", url) } s.Debugf("Trying to connect as leafnode to remote server on %q%s", rURL.Host, ipStr) conn, err = natsDialTimeout("tcp", url, dialTimeout) } if err != nil { attempts++ if s.shouldReportConnectErr(firstConnect, attempts) { s.Errorf(connErrFmt, rURL.Host, attempts, err) } else { s.Debugf(connErrFmt, rURL.Host, attempts, err) } select { case <-s.quitCh: return case <-time.After(reconnectDelay): continue } } if !s.remoteLeafNodeStillValid(remote) { conn.Close() return } // We have a connection here to a remote server. // Go ahead and create our leaf node and return. s.createLeafNode(conn, rURL, remote, nil) return } } // Save off the tlsName for when we use TLS and mix hostnames and IPs. IPs usually // come from the server we connect to. // // We used to save the name only if there was a TLSConfig or scheme equal to "tls". // However, this was causing failures for users that did not set the scheme (and // their remote connections did not have a tls{} block). // We now save the host name regardless in case the remote returns an INFO indicating // that TLS is required. func (cfg *leafNodeCfg) saveTLSHostname(u *url.URL) { if cfg.tlsName == _EMPTY_ && net.ParseIP(u.Hostname()) == nil { cfg.tlsName = u.Hostname() } } // Save off the username/password for when we connect using a bare URL // that we get from the INFO protocol. func (cfg *leafNodeCfg) saveUserPassword(u *url.URL) { if cfg.username == _EMPTY_ && u.User != nil { cfg.username = u.User.Username() cfg.password, _ = u.User.Password() } } // This starts the leafnode accept loop in a go routine, unless it // is detected that the server has already been shutdown. func (s *Server) startLeafNodeAcceptLoop() { // Snapshot server options. opts := s.getOpts() port := opts.LeafNode.Port if port == -1 { port = 0 } s.mu.Lock() if s.shutdown { s.mu.Unlock() return } hp := net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(port)) l, e := natsListen("tcp", hp) s.leafNodeListenerErr = e if e != nil { s.mu.Unlock() s.Fatalf("Error listening on leafnode port: %d - %v", opts.LeafNode.Port, e) return } s.Noticef("Listening for leafnode connections on %s", net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port))) tlsRequired := opts.LeafNode.TLSConfig != nil tlsVerify := tlsRequired && opts.LeafNode.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert info := Info{ ID: s.info.ID, Name: s.info.Name, Version: s.info.Version, GitCommit: gitCommit, GoVersion: runtime.Version(), AuthRequired: true, TLSRequired: tlsRequired, TLSVerify: tlsVerify, MaxPayload: s.info.MaxPayload, // TODO(dlc) - Allow override? Headers: s.supportsHeaders(), JetStream: opts.JetStream, Domain: opts.JetStreamDomain, Proto: 1, // Fixed for now. InfoOnConnect: true, } // If we have selected a random port... if port == 0 { // Write resolved port back to options. opts.LeafNode.Port = l.Addr().(*net.TCPAddr).Port } s.leafNodeInfo = info // Possibly override Host/Port and set IP based on Cluster.Advertise if err := s.setLeafNodeInfoHostPortAndIP(); err != nil { s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", s.opts.LeafNode.Advertise, err) l.Close() s.mu.Unlock() return } s.leafURLsMap[s.leafNodeInfo.IP]++ s.generateLeafNodeInfoJSON() // Setup state that can enable shutdown s.leafNodeListener = l // As of now, a server that does not have remotes configured would // never solicit a connection, so we should not have to warn if // InsecureSkipVerify is set in main LeafNodes config (since // this TLS setting matters only when soliciting a connection). // Still, warn if insecure is set in any of LeafNode block. // We need to check remotes, even if tls is not required on accept. warn := tlsRequired && opts.LeafNode.TLSConfig.InsecureSkipVerify if !warn { for _, r := range opts.LeafNode.Remotes { if r.TLSConfig != nil && r.TLSConfig.InsecureSkipVerify { warn = true break } } } if warn { s.Warnf(leafnodeTLSInsecureWarning) } go s.acceptConnections(l, "Leafnode", func(conn net.Conn) { s.createLeafNode(conn, nil, nil, nil) }, nil) s.mu.Unlock() } // RegEx to match a creds file with user JWT and Seed. var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`) // Lock should be held entering here. func (c *client) sendLeafConnect(clusterName string, tlsRequired, headers bool) error { // We support basic user/pass and operator based user JWT with signatures. cinfo := leafConnectInfo{ TLS: tlsRequired, ID: c.srv.info.ID, Name: c.srv.info.Name, Hub: c.leaf.remote.Hub, Cluster: clusterName, Headers: headers, JetStream: c.acc.jetStreamConfigured(), DenyPub: c.leaf.remote.DenyImports, } // Check for credentials first, that will take precedence.. if creds := c.leaf.remote.Credentials; creds != _EMPTY_ { c.Debugf("Authenticating with credentials file %q", c.leaf.remote.Credentials) contents, err := ioutil.ReadFile(creds) if err != nil { c.Errorf("%v", err) return err } defer wipeSlice(contents) items := credsRe.FindAllSubmatch(contents, -1) if len(items) < 2 { c.Errorf("Credentials file malformed") return err } // First result should be the user JWT. // We copy here so that the file containing the seed will be wiped appropriately. raw := items[0][1] tmp := make([]byte, len(raw)) copy(tmp, raw) // Seed is second item. kp, err := nkeys.FromSeed(items[1][1]) if err != nil { c.Errorf("Credentials file has malformed seed") return err } // Wipe our key on exit. defer kp.Wipe() sigraw, _ := kp.Sign(c.nonce) sig := base64.RawURLEncoding.EncodeToString(sigraw) cinfo.JWT = string(tmp) cinfo.Sig = sig } else if userInfo := c.leaf.remote.curURL.User; userInfo != nil { cinfo.User = userInfo.Username() cinfo.Pass, _ = userInfo.Password() } else if c.leaf.remote.username != _EMPTY_ { cinfo.User = c.leaf.remote.username cinfo.Pass = c.leaf.remote.password } b, err := json.Marshal(cinfo) if err != nil { c.Errorf("Error marshaling CONNECT to route: %v\n", err) return err } // Although this call is made before the writeLoop is created, // we don't really need to send in place. The protocol will be // sent out by the writeLoop. c.enqueueProto([]byte(fmt.Sprintf(ConProto, b))) return nil } // Makes a deep copy of the LeafNode Info structure. // The server lock is held on entry. func (s *Server) copyLeafNodeInfo() *Info { clone := s.leafNodeInfo // Copy the array of urls. if len(s.leafNodeInfo.LeafNodeURLs) > 0 { clone.LeafNodeURLs = append([]string(nil), s.leafNodeInfo.LeafNodeURLs...) } return &clone } // Adds a LeafNode URL that we get when a route connects to the Info structure. // Regenerates the JSON byte array so that it can be sent to LeafNode connections. // Returns a boolean indicating if the URL was added or not. // Server lock is held on entry func (s *Server) addLeafNodeURL(urlStr string) bool { if s.leafURLsMap.addUrl(urlStr) { s.generateLeafNodeInfoJSON() return true } return false } // Removes a LeafNode URL of the route that is disconnecting from the Info structure. // Regenerates the JSON byte array so that it can be sent to LeafNode connections. // Returns a boolean indicating if the URL was removed or not. // Server lock is held on entry. func (s *Server) removeLeafNodeURL(urlStr string) bool { // Don't need to do this if we are removing the route connection because // we are shuting down... if s.shutdown { return false } if s.leafURLsMap.removeUrl(urlStr) { s.generateLeafNodeInfoJSON() return true } return false } // Server lock is held on entry func (s *Server) generateLeafNodeInfoJSON() { s.leafNodeInfo.LeafNodeURLs = s.leafURLsMap.getAsStringSlice() s.leafNodeInfo.WSConnectURLs = s.websocket.connectURLsMap.getAsStringSlice() b, _ := json.Marshal(s.leafNodeInfo) pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)} s.leafNodeInfoJSON = bytes.Join(pcs, []byte(" ")) } // Sends an async INFO protocol so that the connected servers can update // their list of LeafNode urls. func (s *Server) sendAsyncLeafNodeInfo() { for _, c := range s.leafs { c.mu.Lock() c.enqueueProto(s.leafNodeInfoJSON) c.mu.Unlock() } } // Called when an inbound leafnode connection is accepted or we create one for a solicited leafnode. func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCfg, ws *websocket) *client { // Snapshot server options. opts := s.getOpts() maxPay := int32(opts.MaxPayload) maxSubs := int32(opts.MaxSubs) // For system, maxSubs of 0 means unlimited, so re-adjust here. if maxSubs == 0 { maxSubs = -1 } now := time.Now().UTC() c := &client{srv: s, nc: conn, kind: LEAF, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now} // Do not update the smap here, we need to do it in initLeafNodeSmapAndSendSubs c.leaf = &leaf{} // For accepted LN connections, ws will be != nil if it was accepted // through the Websocket port. c.ws = ws // For remote, check if the scheme starts with "ws", if so, we will initiate // a remote Leaf Node connection as a websocket connection. if remote != nil && rURL != nil && isWSURL(rURL) { remote.RLock() c.ws = &websocket{compress: remote.Websocket.Compression, maskwrite: !remote.Websocket.NoMasking} remote.RUnlock() } // Determines if we are soliciting the connection or not. var solicited bool var acc *Account var remoteSuffix string if remote != nil { // For now, if lookup fails, we will constantly try // to recreate this LN connection. remote.Lock() // Users can bind to any local account, if its empty // we will assume the $G account. if remote.LocalAccount == _EMPTY_ { remote.LocalAccount = globalAccountName } lacc := remote.LocalAccount remote.Unlock() var err error acc, err = s.LookupAccount(lacc) if err != nil { // An account not existing is something that can happen with nats/http account resolver and the account // has not yet been pushed, or the request failed for other reasons. // remote needs to be set or retry won't happen c.leaf.remote = remote c.closeConnection(MissingAccount) s.Errorf("Unable to lookup account %s for solicited leafnode connection: %v", lacc, err) return nil } remoteSuffix = fmt.Sprintf(" for account: %s", acc.traceLabel()) } c.mu.Lock() c.initClient() c.Noticef("Leafnode connection created%s", remoteSuffix) if remote != nil { solicited = true remote.Lock() c.leaf.remote = remote c.setPermissions(remote.perms) if !c.leaf.remote.Hub { c.leaf.isSpoke = true } remote.Unlock() c.acc = acc } else { c.flags.set(expectConnect) if ws != nil { c.Debugf("Leafnode compression=%v", c.ws.compress) } } c.mu.Unlock() var nonce [nonceLen]byte var info *Info // Grab this before the client lock below. if !solicited { // Grab server variables s.mu.Lock() info = s.copyLeafNodeInfo() s.generateNonce(nonce[:]) s.mu.Unlock() } // Grab lock c.mu.Lock() var preBuf []byte if solicited { // For websocket connection, we need to send an HTTP request, // and get the response before starting the readLoop to get // the INFO, etc.. if c.isWebsocket() { var err error var closeReason ClosedState preBuf, closeReason, err = c.leafNodeSolicitWSConnection(opts, rURL, remote) if err != nil { c.Errorf("Error soliciting websocket connection: %v", err) c.mu.Unlock() if closeReason != 0 { c.closeConnection(closeReason) } return nil } } else { // We need to wait for the info, but not for too long. c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT)) } // We will process the INFO from the readloop and finish by // sending the CONNECT and finish registration later. } else { // Send our info to the other side. // Remember the nonce we sent here for signatures, etc. c.nonce = make([]byte, nonceLen) copy(c.nonce, nonce[:]) info.Nonce = string(c.nonce) info.CID = c.cid b, _ := json.Marshal(info) pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)} // We have to send from this go routine because we may // have to block for TLS handshake before we start our // writeLoop go routine. The other side needs to receive // this before it can initiate the TLS handshake.. c.sendProtoNow(bytes.Join(pcs, []byte(" "))) // The above call could have marked the connection as closed (due to TCP error). if c.isClosed() { c.mu.Unlock() c.closeConnection(WriteError) return nil } // Check to see if we need to spin up TLS. if !c.isWebsocket() && info.TLSRequired { // Perform server-side TLS handshake. if err := c.doTLSServerHandshake("leafnode", opts.LeafNode.TLSConfig, opts.LeafNode.TLSTimeout, opts.LeafNode.TLSPinnedCerts); err != nil { c.mu.Unlock() return nil } } // Leaf nodes will always require a CONNECT to let us know // when we are properly bound to an account. c.setAuthTimer(secondsToDuration(opts.LeafNode.AuthTimeout)) } // Keep track in case server is shutdown before we can successfully register. if !s.addToTempClients(c.cid, c) { c.mu.Unlock() c.setNoReconnect() c.closeConnection(ServerShutdown) return nil } // Spin up the read loop. s.startGoRoutine(func() { c.readLoop(preBuf) }) // We will sping the write loop for solicited connections only // when processing the INFO and after switching to TLS if needed. if !solicited { s.startGoRoutine(func() { c.writeLoop() }) } c.mu.Unlock() return c } func (c *client) processLeafnodeInfo(info *Info) { s := c.srv opts := s.getOpts() hasSysShared, sysAcc := s.hasSystemRemoteLeaf(), s.SystemAccount() c.mu.Lock() if c.leaf == nil || c.isClosed() { c.mu.Unlock() return } var firstINFO bool // Mark that the INFO protocol has been received. // Note: For now, only the initial INFO has a nonce. We // will probably do auto key rotation at some point. if c.flags.setIfNotSet(infoReceived) { firstINFO = true // Prevent connecting to non leafnode port. Need to do this only for // the first INFO, not for async INFO updates... // // Content of INFO sent by the server when accepting a tcp connection. // ------------------------------------------------------------------- // Listen Port Of | CID | ClientConnectURLs | LeafNodeURLs | Gateway | // ------------------------------------------------------------------- // CLIENT | X* | X** | | | // ROUTE | | X** | X*** | | // GATEWAY | | | | X | // LEAFNODE | X | | X | | // ------------------------------------------------------------------- // * Not on older servers. // ** Not if "no advertise" is enabled. // *** Not if leafnode's "no advertise" is enabled. // // As seen from above, a solicited LeafNode connection should receive // from the remote server an INFO with CID and LeafNodeURLs. Anything // else should be considered an attempt to connect to a wrong port. if c.leaf.remote != nil && (info.CID == 0 || info.LeafNodeURLs == nil) { c.mu.Unlock() c.Errorf(ErrConnectedToWrongPort.Error()) c.closeConnection(WrongPort) return } // Capture a nonce here. c.nonce = []byte(info.Nonce) if info.TLSRequired && c.leaf.remote != nil { c.leaf.remote.TLS = true } supportsHeaders := c.srv.supportsHeaders() c.headers = supportsHeaders && info.Headers // Remember the remote server. // Pre 2.2.0 servers are not sending their server name. // In that case, use info.ID, which, for those servers, matches // the content of the field `Name` in the leafnode CONNECT protocol. if info.Name == _EMPTY_ { c.leaf.remoteServer = info.ID } else { c.leaf.remoteServer = info.Name } // Check for JetStream semantics to deny the JetStream API as needed. // This is so that if JetStream is enabled on both sides we can separately address both. hasJSDomain := opts.JetStreamDomain != _EMPTY_ inJSEnabledDomain := s.JetStreamEnabledForDomain() // Check for mixed mode scenarios to resolve presence of domain names. if !s.JetStreamEnabled() && inJSEnabledDomain && !hasJSDomain && s.jetStreamHasDomainConfigured() { hasJSDomain = true } if remote, acc := c.leaf.remote, c.acc; remote != nil { accHasJS := acc.jetStreamConfigured() remote.Lock() // JetStream checks for mappings and permissions updates. if acc != sysAcc { // Check if JetStream is enabled for this domain. If it's not, and the account // does not have JS, we can act as pass through, so do not deny. if hasSysShared && (inJSEnabledDomain || accHasJS) { s.addInJSDeny(remote) } else { // Here we want to suppress if this local account has JS enabled. // This is regardless of whether or not this server is actually running JS. // We only suppress export. But we do send an indication about our JetStream // status in the connect and the hub side will suppress as well if the remote // account also has JetStream enabled. if accHasJS { s.addInJSDenyExport(remote) // If we specified a domain do not import by default. if hasJSDomain { s.addInJSDenyImport(remote) } } } // If we have a specified JetStream domain we will want to add a mapping to // allow access cross domain for each non-system account. if hasJSDomain && accHasJS { src := fmt.Sprintf(jsDomainAPI, opts.JetStreamDomain) if err := acc.AddMapping(src, jsAllAPI); err != nil { c.Debugf("Error adding JetStream domain mapping: %v", err) } } } else if hasJSDomain && opts.JetStreamDomain != info.Domain { s.addInJSDenyAll(remote) } c.setPermissions(remote.perms) remote.Unlock() } } // For both initial INFO and async INFO protocols, Possibly // update our list of remote leafnode URLs we can connect to. if c.leaf.remote != nil && (len(info.LeafNodeURLs) > 0 || len(info.WSConnectURLs) > 0) { // Consider the incoming array as the most up-to-date // representation of the remote cluster's list of URLs. c.updateLeafNodeURLs(info) } // Check to see if we have permissions updates here. if info.Import != nil || info.Export != nil { perms := &Permissions{ Publish: info.Export, Subscribe: info.Import, } // Check if we have local deny clauses that we need to merge. if remote := c.leaf.remote; remote != nil { if len(remote.DenyExports) > 0 { if perms.Publish == nil { perms.Publish = &SubjectPermission{} } perms.Publish.Deny = append(perms.Publish.Deny, remote.DenyExports...) } if len(remote.DenyImports) > 0 { if perms.Subscribe == nil { perms.Subscribe = &SubjectPermission{} } perms.Subscribe.Deny = append(perms.Subscribe.Deny, remote.DenyImports...) } } c.setPermissions(perms) } var resumeConnect bool // If this is a remote connection and this is the first INFO protocol, // then we need to finish the connect process by sending CONNECT, etc.. if firstINFO && c.leaf.remote != nil { // Clear deadline that was set in createLeafNode while waiting for the INFO. c.nc.SetDeadline(time.Time{}) resumeConnect = true } // Check if we have the remote account information and if so make sure it's stored. if info.RemoteAccount != _EMPTY_ { s.leafRemoteAccounts.Store(c.acc.Name, info.RemoteAccount) } c.mu.Unlock() finishConnect := info.ConnectInfo if resumeConnect && s != nil { s.leafNodeResumeConnectProcess(c) if !info.InfoOnConnect { finishConnect = true } } if finishConnect { s.leafNodeFinishConnectProcess(c) } } // When getting a leaf node INFO protocol, use the provided // array of urls to update the list of possible endpoints. func (c *client) updateLeafNodeURLs(info *Info) { cfg := c.leaf.remote cfg.Lock() defer cfg.Unlock() // We have ensured that if a remote has a WS scheme, then all are. // So check if first is WS, then add WS URLs, otherwise, add non WS ones. if len(cfg.URLs) > 0 && isWSURL(cfg.URLs[0]) { // It does not really matter if we use "ws://" or "wss://" here since // we will have already marked that the remote should use TLS anyway. // But use proper scheme for log statements, etc... proto := wsSchemePrefix if cfg.TLS { proto = wsSchemePrefixTLS } c.doUpdateLNURLs(cfg, proto, info.WSConnectURLs) return } c.doUpdateLNURLs(cfg, "nats-leaf", info.LeafNodeURLs) } func (c *client) doUpdateLNURLs(cfg *leafNodeCfg, scheme string, URLs []string) { cfg.urls = make([]*url.URL, 0, 1+len(URLs)) // Add the ones we receive in the protocol for _, surl := range URLs { url, err := url.Parse(fmt.Sprintf("%s://%s", scheme, surl)) if err != nil { // As per below, the URLs we receive should not have contained URL info, so this should be safe to log. c.Errorf("Error parsing url %q: %v", surl, err) continue } // Do not add if it's the same as what we already have configured. var dup bool for _, u := range cfg.URLs { // URLs that we receive never have user info, but the // ones that were configured may have. Simply compare // host and port to decide if they are equal or not. if url.Host == u.Host && url.Port() == u.Port() { dup = true break } } if !dup { cfg.urls = append(cfg.urls, url) cfg.saveTLSHostname(url) } } // Add the configured one cfg.urls = append(cfg.urls, cfg.URLs...) } // Similar to setInfoHostPortAndGenerateJSON, but for leafNodeInfo. func (s *Server) setLeafNodeInfoHostPortAndIP() error { opts := s.getOpts() if opts.LeafNode.Advertise != _EMPTY_ { advHost, advPort, err := parseHostPort(opts.LeafNode.Advertise, opts.LeafNode.Port) if err != nil { return err } s.leafNodeInfo.Host = advHost s.leafNodeInfo.Port = advPort } else { s.leafNodeInfo.Host = opts.LeafNode.Host s.leafNodeInfo.Port = opts.LeafNode.Port // If the host is "0.0.0.0" or "::" we need to resolve to a public IP. // This will return at most 1 IP. hostIsIPAny, ips, err := s.getNonLocalIPsIfHostIsIPAny(s.leafNodeInfo.Host, false) if err != nil { return err } if hostIsIPAny { if len(ips) == 0 { s.Errorf("Could not find any non-local IP for leafnode's listen specification %q", s.leafNodeInfo.Host) } else { // Take the first from the list... s.leafNodeInfo.Host = ips[0] } } } // Use just host:port for the IP s.leafNodeInfo.IP = net.JoinHostPort(s.leafNodeInfo.Host, strconv.Itoa(s.leafNodeInfo.Port)) if opts.LeafNode.Advertise != _EMPTY_ { s.Noticef("Advertise address for leafnode is set to %s", s.leafNodeInfo.IP) } return nil } // Add the connection to the map of leaf nodes. // If `checkForDup` is true (invoked when a leafnode is accepted), then we check // if a connection already exists for the same server name and account. // That can happen when the remote is attempting to reconnect while the accepting // side did not detect the connection as broken yet. // But it can also happen when there is a misconfiguration and the remote is // creating two (or more) connections that bind to the same account on the accept // side. // When a duplicate is found, the new connection is accepted and the old is closed // (this solves the stale connection situation). An error is returned to help the // remote detect the misconfiguration when the duplicate is the result of that // misconfiguration. func (s *Server) addLeafNodeConnection(c *client, srvName, clusterName string, checkForDup bool) { var accName string c.mu.Lock() cid := c.cid if c.acc != nil { accName = c.acc.Name } c.mu.Unlock() var old *client s.mu.Lock() // We check for empty because in some test we may send empty CONNECT{} if checkForDup && srvName != _EMPTY_ { for _, ol := range s.leafs { ol.mu.Lock() // We care here only about non solicited Leafnode. This function // is more about replacing stale connections than detecting loops. // We have code for the loop detection elsewhere, which also delays // attempt to reconnect. if !ol.isSolicitedLeafNode() && ol.leaf.remoteServer == srvName && ol.leaf.remoteCluster == clusterName && ol.acc.Name == accName { old = ol } ol.mu.Unlock() if old != nil { break } } } // Store new connection in the map s.leafs[cid] = c s.mu.Unlock() s.removeFromTempClients(cid) // If applicable, evict the old one. if old != nil { old.sendErrAndErr(DuplicateRemoteLeafnodeConnection.String()) old.closeConnection(DuplicateRemoteLeafnodeConnection) c.Warnf("Replacing connection from same server") } } func (s *Server) removeLeafNodeConnection(c *client) { c.mu.Lock() cid := c.cid if c.leaf != nil && c.leaf.tsubt != nil { c.leaf.tsubt.Stop() c.leaf.tsubt = nil } c.mu.Unlock() s.mu.Lock() delete(s.leafs, cid) s.mu.Unlock() s.removeFromTempClients(cid) } // Connect information for solicited leafnodes. type leafConnectInfo struct { JWT string `json:"jwt,omitempty"` Sig string `json:"sig,omitempty"` User string `json:"user,omitempty"` Pass string `json:"pass,omitempty"` TLS bool `json:"tls_required"` Comp bool `json:"compression,omitempty"` ID string `json:"server_id,omitempty"` Name string `json:"name,omitempty"` Hub bool `json:"is_hub,omitempty"` Cluster string `json:"cluster,omitempty"` Headers bool `json:"headers,omitempty"` JetStream bool `json:"jetstream,omitempty"` DenyPub []string `json:"deny_pub,omitempty"` // Just used to detect wrong connection attempts. Gateway string `json:"gateway,omitempty"` } // processLeafNodeConnect will process the inbound connect args. // Once we are here we are bound to an account, so can send any interest that // we would have to the other side. func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) error { // Way to detect clients that incorrectly connect to the route listen // port. Client provided "lang" in the CONNECT protocol while LEAFNODEs don't. if lang != _EMPTY_ { c.sendErrAndErr(ErrClientConnectedToLeafNodePort.Error()) c.closeConnection(WrongPort) return ErrClientConnectedToLeafNodePort } // Unmarshal as a leaf node connect protocol proto := &leafConnectInfo{} if err := json.Unmarshal(arg, proto); err != nil { return err } // Reject if this has Gateway which means that it would be from a gateway // connection that incorrectly connects to the leafnode port. if proto.Gateway != _EMPTY_ { errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the leafnode port", proto.Gateway) c.Errorf(errTxt) c.sendErr(errTxt) c.closeConnection(WrongGateway) return ErrWrongGateway } // Check if this server supports headers. supportHeaders := c.srv.supportsHeaders() // Grab system account and server options. sysAcc, opts := s.SystemAccount(), s.getOpts() c.mu.Lock() // Leaf Nodes do not do echo or verbose or pedantic. c.opts.Verbose = false c.opts.Echo = false c.opts.Pedantic = false // This inbound connection will be marked as supporting headers if this server // support headers and the remote has sent in the CONNECT protocol that it does // support headers too. c.headers = supportHeaders && proto.Headers // Remember the remote server. c.leaf.remoteServer = proto.Name // If the other side has declared itself a hub, so we will take on the spoke role. if proto.Hub { c.leaf.isSpoke = true } // The soliciting side is part of a cluster. if proto.Cluster != _EMPTY_ { c.leaf.remoteCluster = proto.Cluster } // When a leaf solicits a connection to a hub, the perms that it will use on the soliciting leafnode's // behalf are correct for them, but inside the hub need to be reversed since data is flowing in the opposite direction. if !c.isSolicitedLeafNode() && c.perms != nil { sp, pp := c.perms.sub, c.perms.pub c.perms.sub, c.perms.pub = pp, sp if c.opts.Import != nil { c.darray = c.opts.Import.Deny } else { c.darray = nil } } // Check for JetStream domain jsConfigured := c.acc.jetStreamConfigured() doDomainMappings := opts.JetStreamDomain != _EMPTY_ && c.acc != sysAcc && jsConfigured // If we have JS enabled and the other side does as well we need to add in an import deny clause. if jsConfigured && proto.JetStream { c.mergePubDenyPermissions([]string{jsAllAPI}) // We need to send this back to the other side. if c.isHubLeafNode() { if c.opts.Import == nil { c.opts.Import = &SubjectPermission{} } c.opts.Import.Deny = append(c.opts.Import.Deny, jsAllAPI) } } // Set the Ping timer s.setFirstPingTimer(c) // If we received pub deny permissions from the other end, merge with existing ones. c.mergePubDenyPermissions(proto.DenyPub) acc := c.acc c.mu.Unlock() // Add in the leafnode here since we passed through auth at this point. s.addLeafNodeConnection(c, proto.Name, proto.Cluster, true) // If we have permissions bound to this leafnode we need to send then back to the // origin server for local enforcement. s.sendPermsAndAccountInfo(c) // Create and initialize the smap since we know our bound account now. // This will send all registered subs too. s.initLeafNodeSmapAndSendSubs(c) // Announce the account connect event for a leaf node. // This will no-op as needed. s.sendLeafNodeConnect(c.acc) // If we have a specified JetStream domain we will want to add a mapping to // allow access cross domain for each non-system account. if doDomainMappings { src := fmt.Sprintf(jsDomainAPI, opts.JetStreamDomain) if err := acc.AddMapping(src, jsAllAPI); err != nil { c.Debugf("Error adding JetStream domain mapping: %v", err) } } return nil } // Returns the remote cluster name. This is set only once so does not require a lock. func (c *client) remoteCluster() string { if c.leaf == nil { return _EMPTY_ } return c.leaf.remoteCluster } // Sends back an info block to the soliciting leafnode to let it know about // its permission settings for local enforcement. func (s *Server) sendPermsAndAccountInfo(c *client) { // Copy info := s.copyLeafNodeInfo() c.mu.Lock() info.CID = c.cid info.Import = c.opts.Import info.Export = c.opts.Export info.RemoteAccount = c.acc.Name info.ConnectInfo = true b, _ := json.Marshal(info) pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)} c.enqueueProto(bytes.Join(pcs, []byte(" "))) c.mu.Unlock() } // Snapshot the current subscriptions from the sublist into our smap which // we will keep updated from now on. // Also send the registered subscriptions. func (s *Server) initLeafNodeSmapAndSendSubs(c *client) { acc := c.acc if acc == nil { c.Debugf("Leafnode does not have an account bound") return } // Collect all account subs here. _subs := [32]*subscription{} subs := _subs[:0] ims := []string{} acc.mu.Lock() accName := acc.Name accNTag := acc.nameTag // To make printing look better when no friendly name present. if accNTag != _EMPTY_ { accNTag = "/" + accNTag } // If we are solicited we only send interest for local clients. if c.isSpokeLeafNode() { acc.sl.localSubs(&subs, true) } else { acc.sl.All(&subs) } // Check if we have an existing service import reply. siReply := acc.siReply // Since leaf nodes only send on interest, if the bound // account has import services we need to send those over. for isubj := range acc.imports.services { if c.isSpokeLeafNode() && !c.canSubscribe(isubj) { c.Debugf("Not permitted to import service %q on behalf of %s%s", isubj, accName, accNTag) continue } ims = append(ims, isubj) } // Likewise for mappings. for _, m := range acc.mappings { ims = append(ims, m.src) } // Create a unique subject that will be used for loop detection. lds := acc.lds if lds == _EMPTY_ { lds = leafNodeLoopDetectionSubjectPrefix + nuid.Next() acc.lds = lds } acc.mu.Unlock() // Now check for gateway interest. Leafnodes will put this into // the proper mode to propagate, but they are not held in the account. gwsa := [16]*client{} gws := gwsa[:0] s.getOutboundGatewayConnections(&gws) for _, cgw := range gws { cgw.mu.Lock() gw := cgw.gw cgw.mu.Unlock() if gw != nil { if ei, _ := gw.outsim.Load(accName); ei != nil { if e := ei.(*outsie); e != nil && e.sl != nil { e.sl.All(&subs) } } } } applyGlobalRouting := s.gateway.enabled if c.isSpokeLeafNode() { // Add a fake subscription for this solicited leafnode connection // so that we can send back directly for mapped GW replies. c.srv.gwLeafSubs.Insert(&subscription{client: c, subject: []byte(gwReplyPrefix + ">")}) } // Now walk the results and add them to our smap c.mu.Lock() rc := c.leaf.remoteCluster c.leaf.smap = make(map[string]int32) for _, sub := range subs { subj := string(sub.subject) if c.isSpokeLeafNode() && !c.canSubscribe(subj) { c.Debugf("Not permitted to subscribe to %q on behalf of %s%s", subj, accName, accNTag) continue } // We ignore ourselves here. // Also don't add the subscription if it has a origin cluster and the // cluster name matches the one of the client we are sending to. if c != sub.client && (sub.origin == nil || (string(sub.origin) != rc)) { c.leaf.smap[keyFromSub(sub)]++ if c.leaf.tsub == nil { c.leaf.tsub = make(map[*subscription]struct{}) } c.leaf.tsub[sub] = struct{}{} } } // FIXME(dlc) - We need to update appropriately on an account claims update. for _, isubj := range ims { c.leaf.smap[isubj]++ } // If we have gateways enabled we need to make sure the other side sends us responses // that have been augmented from the original subscription. // TODO(dlc) - Should we lock this down more? if applyGlobalRouting { c.leaf.smap[oldGWReplyPrefix+"*.>"]++ c.leaf.smap[gwReplyPrefix+">"]++ } // Detect loop by subscribing to a specific subject and checking // if this is coming back to us. c.leaf.smap[lds]++ // Check if we need to add an existing siReply to our map. // This will be a prefix so add on the wildcard. if siReply != nil { wcsub := append(siReply, '>') c.leaf.smap[string(wcsub)]++ } // Queue all protocols. There is no max pending limit for LN connection, // so we don't need chunking. The writes will happen from the writeLoop. var b bytes.Buffer for key, n := range c.leaf.smap { c.writeLeafSub(&b, key, n) } if b.Len() > 0 { c.enqueueProto(b.Bytes()) } if c.leaf.tsub != nil { // Clear the tsub map after 5 seconds. c.leaf.tsubt = time.AfterFunc(5*time.Second, func() { c.mu.Lock() if c.leaf != nil { c.leaf.tsub = nil c.leaf.tsubt = nil } c.mu.Unlock() }) } c.mu.Unlock() } // updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-. func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscription, delta int32) { acc, err := s.LookupAccount(accName) if acc == nil || err != nil { s.Debugf("No or bad account for %q, failed to update interest from gateway", accName) return } s.updateLeafNodes(acc, sub, delta) } // updateLeafNodes will make sure to update the smap for the subscription. Will // also forward to all leaf nodes as needed. func (s *Server) updateLeafNodes(acc *Account, sub *subscription, delta int32) { if acc == nil || sub == nil { return } _l := [32]*client{} leafs := _l[:0] // Grab all leaf nodes. Ignore a leafnode if sub's client is a leafnode and matches. acc.mu.RLock() for _, ln := range acc.lleafs { if ln != sub.client { leafs = append(leafs, ln) } } acc.mu.RUnlock() for _, ln := range leafs { // Check to make sure this sub does not have an origin cluster than matches the leafnode. ln.mu.Lock() skip := (sub.origin != nil && string(sub.origin) == ln.remoteCluster()) || !ln.canSubscribe(string(sub.subject)) ln.mu.Unlock() if skip { continue } ln.updateSmap(sub, delta) } } // This will make an update to our internal smap and determine if we should send out // an interest update to the remote side. func (c *client) updateSmap(sub *subscription, delta int32) { key := keyFromSub(sub) c.mu.Lock() if c.leaf.smap == nil { c.mu.Unlock() return } // If we are solicited make sure this is a local client or a non-solicited leaf node skind := sub.client.kind updateClient := skind == CLIENT || skind == SYSTEM || skind == JETSTREAM || skind == ACCOUNT if c.isSpokeLeafNode() && !(updateClient || (skind == LEAF && !sub.client.isSpokeLeafNode())) { c.mu.Unlock() return } // For additions, check if that sub has just been processed during initLeafNodeSmapAndSendSubs if delta > 0 && c.leaf.tsub != nil { if _, present := c.leaf.tsub[sub]; present { delete(c.leaf.tsub, sub) if len(c.leaf.tsub) == 0 { c.leaf.tsub = nil c.leaf.tsubt.Stop() c.leaf.tsubt = nil } c.mu.Unlock() return } } n := c.leaf.smap[key] // We will update if its a queue, if count is zero (or negative), or we were 0 and are N > 0. update := sub.queue != nil || n == 0 || n+delta <= 0 n += delta if n > 0 { c.leaf.smap[key] = n } else { delete(c.leaf.smap, key) } if update { c.sendLeafNodeSubUpdate(key, n) } c.mu.Unlock() } // Used to force add subjects to the subject map. func (c *client) forceAddToSmap(subj string) { c.mu.Lock() defer c.mu.Unlock() n := c.leaf.smap[subj] if n != 0 { return } // Place into the map since it was not there. c.leaf.smap[subj] = 1 c.sendLeafNodeSubUpdate(subj, 1) } // Send the subscription interest change to the other side. // Lock should be held. func (c *client) sendLeafNodeSubUpdate(key string, n int32) { // If we are a spoke, we need to check if we are allowed to send this subscription over to the hub. if c.isSpokeLeafNode() { checkPerms := true if len(key) > 0 && (key[0] == '$' || key[0] == '_') { if strings.HasPrefix(key, leafNodeLoopDetectionSubjectPrefix) || strings.HasPrefix(key, oldGWReplyPrefix) || strings.HasPrefix(key, gwReplyPrefix) { checkPerms = false } } if checkPerms && !c.canSubscribe(key) { return } } // If we are here we can send over to the other side. _b := [64]byte{} b := bytes.NewBuffer(_b[:0]) c.writeLeafSub(b, key, n) c.enqueueProto(b.Bytes()) } // Helper function to build the key. func keyFromSub(sub *subscription) string { var _rkey [1024]byte var key []byte if sub.queue != nil { // Just make the key subject spc group, e.g. 'foo bar' key = _rkey[:0] key = append(key, sub.subject...) key = append(key, byte(' ')) key = append(key, sub.queue...) } else { key = sub.subject } return string(key) } // Lock should be held. func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) { if key == _EMPTY_ { return } if n > 0 { w.WriteString("LS+ " + key) // Check for queue semantics, if found write n. if strings.Contains(key, " ") { w.WriteString(" ") var b [12]byte var i = len(b) for l := n; l > 0; l /= 10 { i-- b[i] = digits[l%10] } w.Write(b[i:]) if c.trace { arg := fmt.Sprintf("%s %d", key, n) c.traceOutOp("LS+", []byte(arg)) } } else if c.trace { c.traceOutOp("LS+", []byte(key)) } } else { w.WriteString("LS- " + key) if c.trace { c.traceOutOp("LS-", []byte(key)) } } w.WriteString(CR_LF) } // processLeafSub will process an inbound sub request for the remote leaf node. func (c *client) processLeafSub(argo []byte) (err error) { // Indicate activity. c.in.subs++ srv := c.srv if srv == nil { return nil } // Copy so we do not reference a potentially large buffer arg := make([]byte, len(argo)) copy(arg, argo) args := splitArg(arg) sub := &subscription{client: c} switch len(args) { case 1: sub.queue = nil case 3: sub.queue = args[1] sub.qw = int32(parseSize(args[2])) default: return fmt.Errorf("processLeafSub Parse Error: '%s'", arg) } sub.subject = args[0] c.mu.Lock() if c.isClosed() { c.mu.Unlock() return nil } acc := c.acc // Check if we have a loop. ldsPrefix := bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix)) if ldsPrefix && string(sub.subject) == acc.getLDSubject() { c.mu.Unlock() c.handleLeafNodeLoop(true) return nil } // Check permissions if applicable. (but exclude the $LDS, $GR and _GR_) checkPerms := true if sub.subject[0] == '$' || sub.subject[0] == '_' { if ldsPrefix || bytes.HasPrefix(sub.subject, []byte(oldGWReplyPrefix)) || bytes.HasPrefix(sub.subject, []byte(gwReplyPrefix)) { checkPerms = false } } // If we are a hub check that we can publish to this subject. if checkPerms && subjectIsLiteral(string(sub.subject)) && !c.pubAllowedFullCheck(string(sub.subject), true, true) { c.mu.Unlock() c.leafSubPermViolation(sub.subject) return nil } // Check if we have a maximum on the number of subscriptions. if c.subsAtLimit() { c.mu.Unlock() c.maxSubsExceeded() return nil } // If we have an origin cluster associated mark that in the sub. if rc := c.remoteCluster(); rc != _EMPTY_ { sub.origin = []byte(rc) } // Like Routes, we store local subs by account and subject and optionally queue name. // If we have a queue it will have a trailing weight which we do not want. if sub.queue != nil { sub.sid = arg[:len(arg)-len(args[2])-1] } else { sub.sid = arg } key := string(sub.sid) osub := c.subs[key] updateGWs := false delta := int32(1) if osub == nil { c.subs[key] = sub // Now place into the account sl. if err := acc.sl.Insert(sub); err != nil { delete(c.subs, key) c.mu.Unlock() c.Errorf("Could not insert subscription: %v", err) c.sendErr("Invalid Subscription") return nil } updateGWs = srv.gateway.enabled } else if sub.queue != nil { // For a queue we need to update the weight. delta = sub.qw - atomic.LoadInt32(&osub.qw) atomic.StoreInt32(&osub.qw, sub.qw) acc.sl.UpdateRemoteQSub(osub) } spoke := c.isSpokeLeafNode() c.mu.Unlock() if err := c.addShadowSubscriptions(acc, sub); err != nil { c.Errorf(err.Error()) } // If we are not solicited, treat leaf node subscriptions similar to a // client subscription, meaning we forward them to routes, gateways and // other leaf nodes as needed. if !spoke { // If we are routing add to the route map for the associated account. srv.updateRouteSubscriptionMap(acc, sub, delta) if updateGWs { srv.gatewayUpdateSubInterest(acc.Name, sub, delta) } } // Now check on leafnode updates for other leaf nodes. We understand solicited // and non-solicited state in this call so we will do the right thing. srv.updateLeafNodes(acc, sub, delta) return nil } // If the leafnode is a solicited, set the connect delay based on default // or private option (for tests). Sends the error to the other side, log and // close the connection. func (c *client) handleLeafNodeLoop(sendErr bool) { accName, delay := c.setLeafConnectDelayIfSoliciting(leafNodeReconnectDelayAfterLoopDetected) errTxt := fmt.Sprintf("Loop detected for leafnode account=%q. Delaying attempt to reconnect for %v", accName, delay) if sendErr { c.sendErr(errTxt) } c.Errorf(errTxt) // If we are here with "sendErr" false, it means that this is the server // that received the error. The other side will have closed the connection, // but does not hurt to close here too. c.closeConnection(ProtocolViolation) } // processLeafUnsub will process an inbound unsub request for the remote leaf node. func (c *client) processLeafUnsub(arg []byte) error { // Indicate any activity, so pub and sub or unsubs. c.in.subs++ acc := c.acc srv := c.srv c.mu.Lock() if c.isClosed() { c.mu.Unlock() return nil } updateGWs := false spoke := c.isSpokeLeafNode() // We store local subs by account and subject and optionally queue name. // LS- will have the arg exactly as the key. sub, ok := c.subs[string(arg)] c.mu.Unlock() if ok { c.unsubscribe(acc, sub, true, true) updateGWs = srv.gateway.enabled } if !spoke { // If we are routing subtract from the route map for the associated account. srv.updateRouteSubscriptionMap(acc, sub, -1) // Gateways if updateGWs { srv.gatewayUpdateSubInterest(acc.Name, sub, -1) } } // Now check on leafnode updates for other leaf nodes. srv.updateLeafNodes(acc, sub, -1) return nil } func (c *client) processLeafHeaderMsgArgs(arg []byte) error { // Unroll splitArgs to avoid runtime/heap issues a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } c.pa.arg = arg switch len(args) { case 0, 1, 2: return fmt.Errorf("processLeafHeaderMsgArgs Parse Error: '%s'", args) case 3: c.pa.reply = nil c.pa.queues = nil c.pa.hdb = args[1] c.pa.hdr = parseSize(args[1]) c.pa.szb = args[2] c.pa.size = parseSize(args[2]) case 4: c.pa.reply = args[1] c.pa.queues = nil c.pa.hdb = args[2] c.pa.hdr = parseSize(args[2]) c.pa.szb = args[3] c.pa.size = parseSize(args[3]) default: // args[1] is our reply indicator. Should be + or | normally. if len(args[1]) != 1 { return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1]) } switch args[1][0] { case '+': c.pa.reply = args[2] case '|': c.pa.reply = nil default: return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1]) } // Grab header size. c.pa.hdb = args[len(args)-2] c.pa.hdr = parseSize(c.pa.hdb) // Grab size. c.pa.szb = args[len(args)-1] c.pa.size = parseSize(c.pa.szb) // Grab queue names. if c.pa.reply != nil { c.pa.queues = args[3 : len(args)-2] } else { c.pa.queues = args[2 : len(args)-2] } } if c.pa.hdr < 0 { return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Header Size: '%s'", arg) } if c.pa.size < 0 { return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Size: '%s'", args) } if c.pa.hdr > c.pa.size { return fmt.Errorf("processLeafHeaderMsgArgs Header Size larger then TotalSize: '%s'", arg) } // Common ones processed after check for arg length c.pa.subject = args[0] return nil } func (c *client) processLeafMsgArgs(arg []byte) error { // Unroll splitArgs to avoid runtime/heap issues a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } c.pa.arg = arg switch len(args) { case 0, 1: return fmt.Errorf("processLeafMsgArgs Parse Error: '%s'", args) case 2: c.pa.reply = nil c.pa.queues = nil c.pa.szb = args[1] c.pa.size = parseSize(args[1]) case 3: c.pa.reply = args[1] c.pa.queues = nil c.pa.szb = args[2] c.pa.size = parseSize(args[2]) default: // args[1] is our reply indicator. Should be + or | normally. if len(args[1]) != 1 { return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1]) } switch args[1][0] { case '+': c.pa.reply = args[2] case '|': c.pa.reply = nil default: return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1]) } // Grab size. c.pa.szb = args[len(args)-1] c.pa.size = parseSize(c.pa.szb) // Grab queue names. if c.pa.reply != nil { c.pa.queues = args[3 : len(args)-1] } else { c.pa.queues = args[2 : len(args)-1] } } if c.pa.size < 0 { return fmt.Errorf("processLeafMsgArgs Bad or Missing Size: '%s'", args) } // Common ones processed after check for arg length c.pa.subject = args[0] return nil } // processInboundLeafMsg is called to process an inbound msg from a leaf node. func (c *client) processInboundLeafMsg(msg []byte) { // Update statistics // The msg includes the CR_LF, so pull back out for accounting. c.in.msgs++ c.in.bytes += int32(len(msg) - LEN_CR_LF) srv, acc, subject := c.srv, c.acc, string(c.pa.subject) // Mostly under testing scenarios. if srv == nil || acc == nil { return } // Match the subscriptions. We will use our own L1 map if // it's still valid, avoiding contention on the shared sublist. var r *SublistResult var ok bool genid := atomic.LoadUint64(&c.acc.sl.genid) if genid == c.in.genid && c.in.results != nil { r, ok = c.in.results[subject] } else { // Reset our L1 completely. c.in.results = make(map[string]*SublistResult) c.in.genid = genid } // Go back to the sublist data structure. if !ok { r = c.acc.sl.Match(subject) c.in.results[subject] = r // Prune the results cache. Keeps us from unbounded growth. Random delete. if len(c.in.results) > maxResultCacheSize { n := 0 for subj := range c.in.results { delete(c.in.results, subj) if n++; n > pruneSize { break } } } } // Collect queue names if needed. var qnames [][]byte // Check for no interest, short circuit if so. // This is the fanout scale. if len(r.psubs)+len(r.qsubs) > 0 { flag := pmrNoFlag // If we have queue subs in this cluster, then if we run in gateway // mode and the remote gateways have queue subs, then we need to // collect the queue groups this message was sent to so that we // exclude them when sending to gateways. if len(r.qsubs) > 0 && c.srv.gateway.enabled && atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 { flag |= pmrCollectQueueNames } _, qnames = c.processMsgResults(acc, r, msg, nil, c.pa.subject, c.pa.reply, flag) } // Now deal with gateways if c.srv.gateway.enabled { c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames) } } // Handles a subscription permission violation. // See leafPermViolation() for details. func (c *client) leafSubPermViolation(subj []byte) { c.leafPermViolation(false, subj) } // Common function to process publish or subscribe leafnode permission violation. // Sends the permission violation error to the remote, logs it and closes the connection. // If this is from a server soliciting, the reconnection will be delayed. func (c *client) leafPermViolation(pub bool, subj []byte) { if c.isSpokeLeafNode() { // For spokes these are no-ops since the hub server told us our permissions. // We just need to not send these over to the other side since we will get cutoff. return } // FIXME(dlc) ? c.setLeafConnectDelayIfSoliciting(leafNodeReconnectAfterPermViolation) var action string if pub { c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subj)) action = "Publish" } else { c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", subj)) action = "Subscription" } c.Errorf("%s Violation on %q - Check other side configuration", action, subj) // TODO: add a new close reason that is more appropriate? c.closeConnection(ProtocolViolation) } // Invoked from generic processErr() for LEAF connections. func (c *client) leafProcessErr(errStr string) { // We will look for Loop detected error coming from the other side. // If we solicit, set the connect delay. if !strings.Contains(errStr, "Loop detected") { return } c.handleLeafNodeLoop(false) } // If this leaf connection solicits, sets the connect delay to the given value, // or the one from the server option's LeafNode.connDelay if one is set (for tests). // Returns the connection's account name and delay. func (c *client) setLeafConnectDelayIfSoliciting(delay time.Duration) (string, time.Duration) { c.mu.Lock() if c.isSolicitedLeafNode() { if s := c.srv; s != nil { if srvdelay := s.getOpts().LeafNode.connDelay; srvdelay != 0 { delay = srvdelay } } c.leaf.remote.setConnectDelay(delay) } accName := c.acc.Name c.mu.Unlock() return accName, delay } // For the given remote Leafnode configuration, this function returns // if TLS is required, and if so, will return a clone of the TLS Config // (since some fields will be changed during handshake), the TLS server // name that is remembered, and the TLS timeout. func (c *client) leafNodeGetTLSConfigForSolicit(remote *leafNodeCfg, needsLock bool) (bool, *tls.Config, string, float64) { var ( tlsConfig *tls.Config tlsName string tlsTimeout float64 ) if needsLock { remote.RLock() } tlsRequired := remote.TLS || remote.TLSConfig != nil if tlsRequired { if remote.TLSConfig != nil { tlsConfig = remote.TLSConfig.Clone() } else { tlsConfig = &tls.Config{MinVersion: tls.VersionTLS12} } tlsName = remote.tlsName tlsTimeout = remote.TLSTimeout if tlsTimeout == 0 { tlsTimeout = float64(TLS_TIMEOUT / time.Second) } } if needsLock { remote.RUnlock() } return tlsRequired, tlsConfig, tlsName, tlsTimeout } // Initiates the LeafNode Websocket connection by: // - doing the TLS handshake if needed // - sending the HTTP request // - waiting for the HTTP response // // Since some bufio reader is used to consume the HTTP response, this function // returns the slice of buffered bytes (if any) so that the readLoop that will // be started after that consume those first before reading from the socket. // The boolean // // Lock held on entry. func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remote *leafNodeCfg) ([]byte, ClosedState, error) { remote.RLock() compress := remote.Websocket.Compression // By default the server will mask outbound frames, but it can be disabled with this option. noMasking := remote.Websocket.NoMasking tlsRequired, tlsConfig, tlsName, tlsTimeout := c.leafNodeGetTLSConfigForSolicit(remote, false) remote.RUnlock() // Do TLS here as needed. if tlsRequired { // Perform the client-side TLS handshake. if resetTLSName, err := c.doTLSClientHandshake("leafnode", rURL, tlsConfig, tlsName, tlsTimeout, opts.LeafNode.TLSPinnedCerts); err != nil { // Check if we need to reset the remote's TLS name. if resetTLSName { remote.Lock() remote.tlsName = _EMPTY_ remote.Unlock() } // 0 will indicate that the connection was already closed return nil, 0, err } } // For http request, we need the passed URL to contain either http or https scheme. scheme := "http" if tlsRequired { scheme = "https" } // We will use the `/leafnode` path to tell the accepting WS server that it should // create a LEAF connection, not a CLIENT. // In case we use the user's URL path in the future, make sure we append the user's // path to our `/leafnode` path. path := leafNodeWSPath if curPath := rURL.EscapedPath(); curPath != _EMPTY_ { if curPath[0] == '/' { curPath = curPath[1:] } path += curPath } ustr := fmt.Sprintf("%s://%s%s", scheme, rURL.Host, path) u, _ := url.Parse(ustr) req := &http.Request{ Method: "GET", URL: u, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: make(http.Header), Host: u.Host, } wsKey, err := wsMakeChallengeKey() if err != nil { return nil, WriteError, err } req.Header["Upgrade"] = []string{"websocket"} req.Header["Connection"] = []string{"Upgrade"} req.Header["Sec-WebSocket-Key"] = []string{wsKey} req.Header["Sec-WebSocket-Version"] = []string{"13"} if compress { req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue) } if noMasking { req.Header.Add(wsNoMaskingHeader, wsNoMaskingValue) } if err := req.Write(c.nc); err != nil { return nil, WriteError, err } var resp *http.Response br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE) c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT)) resp, err = http.ReadResponse(br, req) if err == nil && (resp.StatusCode != 101 || !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) { err = fmt.Errorf("invalid websocket connection") } // Check compression extension... if err == nil && c.ws.compress { // Check that not only permessage-deflate extension is present, but that // we also have server and client no context take over. srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header, false) // If server does not support compression, then simply disable it in our side. if !srvCompress { c.ws.compress = false } else if !noCtxTakeover { err = fmt.Errorf("compression negotiation error") } } // Same for no masking... if err == nil && noMasking { // Check if server accepts no masking if resp.Header.Get(wsNoMaskingHeader) != wsNoMaskingValue { // Nope, need to mask our writes as any client would do. c.ws.maskwrite = true } } if resp != nil { resp.Body.Close() } if err != nil { return nil, ReadError, err } c.Debugf("Leafnode compression=%v masking=%v", c.ws.compress, c.ws.maskwrite) var preBuf []byte // We have to slurp whatever is in the bufio reader and pass that to the readloop. if n := br.Buffered(); n != 0 { preBuf, _ = br.Peek(n) } return preBuf, 0, nil } const connectProcessTimeout = 2 * time.Second // This is invoked for remote LEAF remote connections after processing the INFO // protocol. This will do the TLS handshake (if needed be) func (s *Server) leafNodeResumeConnectProcess(c *client) { clusterName := s.ClusterName() c.mu.Lock() if c.isClosed() { c.mu.Unlock() return } remote := c.leaf.remote var tlsRequired bool // In case of websocket, the TLS handshake has been already done. // So check only for non websocket connections. if !c.isWebsocket() { var tlsConfig *tls.Config var tlsName string var tlsTimeout float64 // Check if TLS is required and gather TLS config variables. tlsRequired, tlsConfig, tlsName, tlsTimeout = c.leafNodeGetTLSConfigForSolicit(remote, true) // If TLS required, peform handshake. if tlsRequired { // Get the URL that was used to connect to the remote server. rURL := remote.getCurrentURL() // Perform the client-side TLS handshake. if resetTLSName, err := c.doTLSClientHandshake("leafnode", rURL, tlsConfig, tlsName, tlsTimeout, c.srv.getOpts().LeafNode.TLSPinnedCerts); err != nil { // Check if we need to reset the remote's TLS name. if resetTLSName { remote.Lock() remote.tlsName = _EMPTY_ remote.Unlock() } c.mu.Unlock() return } } } if err := c.sendLeafConnect(clusterName, tlsRequired, c.headers); err != nil { c.mu.Unlock() c.closeConnection(WriteError) return } // Spin up the write loop. s.startGoRoutine(func() { c.writeLoop() }) // timeout leafNodeFinishConnectProcess c.ping.tmr = time.AfterFunc(connectProcessTimeout, func() { c.mu.Lock() // check if leafNodeFinishConnectProcess was called and prevent later leafNodeFinishConnectProcess if !c.flags.setIfNotSet(connectProcessFinished) { c.mu.Unlock() return } clearTimer(&c.ping.tmr) closed := c.isClosed() c.mu.Unlock() if !closed { c.sendErrAndDebug("Stale Leaf Node Connection - Closing") c.closeConnection(StaleConnection) } }) c.mu.Unlock() c.Debugf("Remote leafnode connect msg sent") } // This is invoked for remote LEAF connections after processing the INFO // protocol and leafNodeResumeConnectProcess. // This will send LS+ the CONNECT protocol and register the leaf node. func (s *Server) leafNodeFinishConnectProcess(c *client) { c.mu.Lock() if !c.flags.setIfNotSet(connectProcessFinished) { c.mu.Unlock() return } if c.isClosed() { c.mu.Unlock() s.removeLeafNodeConnection(c) return } remote := c.leaf.remote // Check if we will need to send the system connect event. remote.RLock() sendSysConnectEvent := remote.Hub remote.RUnlock() // Capture account before releasing lock acc := c.acc // cancel connectProcessTimeout clearTimer(&c.ping.tmr) c.mu.Unlock() // Make sure we register with the account here. c.registerWithAccount(acc) s.addLeafNodeConnection(c, _EMPTY_, _EMPTY_, false) s.initLeafNodeSmapAndSendSubs(c) if sendSysConnectEvent { s.sendLeafNodeConnect(acc) } // The above functions are not atomically under the client // lock doing those operations. It is possible - since we // have started the read/write loops - that the connection // is closed before or in between. This would leave the // closed LN connection possible registered with the account // and/or the server's leafs map. So check if connection // is closed, and if so, manually cleanup. c.mu.Lock() closed := c.isClosed() if !closed { s.setFirstPingTimer(c) } c.mu.Unlock() if closed { s.removeLeafNodeConnection(c) if prev := acc.removeClient(c); prev == 1 { s.decActiveAccounts() } } }
1
14,242
Should it not be more something like: `MaxAccountConnectionsExceeded` here?
nats-io-nats-server
go
@@ -180,6 +180,13 @@ func TestTrimComputeEndpoint(t *testing.T) { parseAndValidate(t, "-compute_endpoint_override", " http://endpoint ").ComputeEndpoint) } +func TestTrimComputeServiceAccount(t *testing.T) { + assert.Equal(t, "", + parseAndValidate(t, "-compute_service_account", " ").ComputeServiceAccount) + assert.Equal(t, "default", + parseAndValidate(t, "-compute_service_account", " default ").ComputeServiceAccount) +} + func TestGcsLogsDisabled(t *testing.T) { assert.False(t, parseAndValidate(t, "-disable_gcs_logging=false").GcsLogsDisabled) assert.True(t, parseAndValidate(t, "-disable_gcs_logging=true").GcsLogsDisabled)
1
// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package importer import ( "errors" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/daisycommon" ) func TestRequireImageName(t *testing.T) { assert.EqualError(t, expectFailedValidation(t, "-client_id=pantheon"), "-image_name has to be specified") } func TestTrimAndLowerImageName(t *testing.T) { assert.Equal(t, "gcp-is-great", parseAndValidate(t, "-image_name", " GCP-is-GREAT ").ImageName) } func TestTrimFamily(t *testing.T) { assert.Equal(t, "Ubuntu", parseAndValidate(t, "-family", " Ubuntu ").Family) } func TestTrimDescription(t *testing.T) { assert.Equal(t, "Ubuntu", parseAndValidate(t, "-description", " Ubuntu ").Description) } func TestParseLabelsToMap(t *testing.T) { expected := map[string]string{"internal": "true", "private": "false"} assert.Equal(t, expected, parseAndValidate(t, "-labels=internal=true,private=false").Labels) } func TestFailOnLabelSyntaxError(t *testing.T) { assert.Contains(t, expectFailedParse(t, "-labels=internal:true").Error(), "invalid value \"internal:true\" for flag -labels") } func TestPopulateStorageLocationIfMissing(t *testing.T) { args := []string{"-image_name=i", "-client_id=c", "-data_disk"} actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", storageLocation: "us", }, mockSourceFactory{}) assert.NoError(t, err) assert.Equal(t, "us", actual.StorageLocation) } func TestTrimAndLowerStorageLocation(t *testing.T) { assert.Equal(t, "eu", parseAndValidate(t, "-storage_location", " EU ").StorageLocation) } func TestPopulateWorkflowDir(t *testing.T) { assert.Regexp(t, ".*/daisy_workflows", parseAndValidate(t).WorkflowDir) } func TestFailWhenClientIdMissing(t *testing.T) { assert.Contains(t, expectFailedValidation(t).Error(), "-client_id has to be specified") } func TestTrimAndLowerClientId(t *testing.T) { assert.Equal(t, "pantheon", parseAndValidate(t, "-client_id", " Pantheon ").ClientID) } func TestTrimClientVersion(t *testing.T) { assert.Equal(t, "301.0.0B", parseAndValidate(t, "-client_version", " 301.0.0B ").ClientVersion) } func TestTrimProject(t *testing.T) { assert.Equal(t, "TestProject", parseAndValidate(t, "-project", " TestProject ").Project) } func TestPopulateProjectIfMissing(t *testing.T) { args := []string{"-image_name=i", "-client_id=c", "-data_disk"} actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", project: "the-project", }, mockSourceFactory{}) assert.NoError(t, err) assert.Equal(t, "the-project", actual.Project) } func TestTrimNetwork(t *testing.T) { assert.Equal(t, "global/networks/id", parseAndValidate(t, "-network", " id ").Network) } func TestTrimSubnet(t *testing.T) { assert.Equal(t, "regions/us-west2/subnetworks/sub-id", parseAndValidate(t, "-subnet", " sub-id ").Subnet) } func TestTrimAndLowerZone(t *testing.T) { assert.Equal(t, "us-central4-a", parseAndValidate(t, "-zone", " us-central4-a ").Zone) } func TestPopulateZoneIfMissing(t *testing.T) { args := []string{"-image_name=i", "-client_id=c", "-data_disk"} actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", }, mockSourceFactory{}) assert.NoError(t, err) assert.Equal(t, "us-west2-a", actual.Zone) } func TestPopulateRegion(t *testing.T) { args := []string{"-image_name=i", "-client_id=c", "-data_disk"} actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", }, mockSourceFactory{}) assert.NoError(t, err) assert.Equal(t, "us-west2", actual.Region) } func TestScratchBucketPath(t *testing.T) { started := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) id := "abc" scratchDir := "gce-image-import-2009-11-10T23:00:00Z-abc" var flagtests = []struct { name string bucketArg string expected string }{ {"no path", "gs://bucket", "gs://bucket/" + scratchDir}, {"empty path", "gs://bucket/", "gs://bucket/" + scratchDir}, {"with path", "gs://bucket/path", "gs://bucket/path/" + scratchDir}, {"trim, no path", " gs://bucket ", "gs://bucket/" + scratchDir}, {"trim, empty path", " gs://bucket/ ", "gs://bucket/" + scratchDir}, {"trim, with path", " gs://bucket/path ", "gs://bucket/path/" + scratchDir}, {"populate when missing", "", "gs://fallback-bucket/" + scratchDir}, } for _, tt := range flagtests { t.Run(tt.name, func(t *testing.T) { args := parse(t, "-scratch_bucket_gcs_path", tt.bucketArg) args.Started = started args.ExecutionID = id err := args.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", scratchBucket: "gs://fallback-bucket/", }, mockSourceFactory{}) assert.NoError(t, err) assert.Equal(t, tt.expected, args.ScratchBucketGcsPath) }) } } func TestTrimOauth(t *testing.T) { assert.Equal(t, "file.json", parseAndValidate(t, "-oauth", " file.json ").Oauth) } func TestTrimComputeEndpoint(t *testing.T) { assert.Equal(t, "http://endpoint", parseAndValidate(t, "-compute_endpoint_override", " http://endpoint ").ComputeEndpoint) } func TestGcsLogsDisabled(t *testing.T) { assert.False(t, parseAndValidate(t, "-disable_gcs_logging=false").GcsLogsDisabled) assert.True(t, parseAndValidate(t, "-disable_gcs_logging=true").GcsLogsDisabled) assert.True(t, parseAndValidate(t, "-disable_gcs_logging").GcsLogsDisabled) } func TestCloudLogsDisabled(t *testing.T) { assert.False(t, parseAndValidate(t, "-disable_cloud_logging=false").CloudLogsDisabled) assert.True(t, parseAndValidate(t, "-disable_cloud_logging=true").CloudLogsDisabled) assert.True(t, parseAndValidate(t, "-disable_cloud_logging").CloudLogsDisabled) } func TestStdoutLogsDisabled(t *testing.T) { assert.False(t, parseAndValidate(t, "-disable_stdout_logging=false").StdoutLogsDisabled) assert.True(t, parseAndValidate(t, "-disable_stdout_logging=true").StdoutLogsDisabled) assert.True(t, parseAndValidate(t, "-disable_stdout_logging").StdoutLogsDisabled) } func TestNoExternalIp(t *testing.T) { assert.False(t, parseAndValidate(t, "-no_external_ip=false").NoExternalIP) assert.True(t, parseAndValidate(t, "-no_external_ip=true").NoExternalIP) assert.True(t, parseAndValidate(t, "-no_external_ip").NoExternalIP) } func TestPopulateNetworkAndSubnet(t *testing.T) { tests := []struct { name string args []string expectedNetwork string expectedSubnet string }{ { name: "populate network as default when network and subnet empty", expectedNetwork: "global/networks/default", }, { name: "qualify network when specified", args: []string{"-network", "custom-network"}, expectedNetwork: "global/networks/custom-network", }, { name: "don't populate empty network when subnet is specified", args: []string{"-subnet", "custom-subnet"}, expectedSubnet: "regions/us-west2/subnetworks/custom-subnet", }, { name: "qualify network and subnet when both specified", args: []string{"-subnet", "custom-subnet", "-network", "custom-network"}, expectedNetwork: "global/networks/custom-network", expectedSubnet: "regions/us-west2/subnetworks/custom-subnet", }, { name: "keep pre-qualified URIs", args: []string{ "-subnet", "regions/us-west2/subnetworks/pre-qual-subnet", "-network", "global/networks/pre-qual-network"}, expectedNetwork: "global/networks/pre-qual-network", expectedSubnet: "regions/us-west2/subnetworks/pre-qual-subnet", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actual := parseAndValidate(t, tt.args...) assert.Equal(t, tt.expectedNetwork, actual.Network) assert.Equal(t, tt.expectedSubnet, actual.Subnet) }) } } func TestTrimSourceFile(t *testing.T) { assert.Equal(t, "gs://bucket/image.vmdk", parseAndValidate( t, "-source_file", " gs://bucket/image.vmdk ").SourceFile) } func TestTrimSourceImage(t *testing.T) { assert.Equal(t, "path/source-image", parseAndValidate( t, "-source_image", " path/source-image ").SourceImage) } func TestSourceObjectFromSourceImage(t *testing.T) { args := []string{"-source_image", "path/source-image", "-image_name=i", "-client_id=c", "-data_disk"} actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", scratchBucket: "gs://custom-bucket/", }, mockSourceFactory{ expectedImage: "path/source-image", t: t, }) assert.NoError(t, err) assert.Equal(t, "path/source-image", actual.SourceImage) assert.Equal(t, "path/source-image", actual.Source.Path()) } func TestSourceObjectFromSourceFile(t *testing.T) { args := []string{"-source_file", "gs://path/file", "-image_name=i", "-client_id=c", "-data_disk"} actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", scratchBucket: "gs://custom-bucket/", }, mockSourceFactory{ expectedFile: "gs://path/file", t: t, }) assert.NoError(t, err) assert.Equal(t, "gs://path/file", actual.SourceFile) assert.Equal(t, "gs://path/file", actual.Source.Path()) } func TestErrorWhenSourceValidationFails(t *testing.T) { args := []string{"-image_name=i", "-client_id=c", "-data_disk"} actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", scratchBucket: "gs://custom-bucket/", }, mockSourceFactory{ t: t, err: errors.New("bad source"), }) assert.Error(t, err) assert.Contains(t, err.Error(), "bad source") } func TestDataDiskSettable(t *testing.T) { assert.False(t, parseAndValidate(t, "-data_disk=false", "-os=ubuntu-1804").DataDisk) assert.False(t, parseAndValidate(t, "-os=ubuntu-1804").DataDisk) assert.True(t, parseAndValidate(t, "-data_disk=true").DataDisk) assert.True(t, parseAndValidate(t, "-data_disk").DataDisk) } func TestTrimAndLowerOS(t *testing.T) { assert.Equal(t, "ubuntu-1804", parseAndValidate(t, "-os", " UBUNTU-1804 ").OS) } func TestFailWhenOSNotRegistered(t *testing.T) { assert.Contains(t, expectFailedValidation(t, "-os=android", "-client_id=c", "-image_name=i").Error(), "os `android` is invalid. Allowed values:") } func TestNoGuestEnvironmentSettable(t *testing.T) { assert.False(t, parseAndValidate(t, "-data_disk=false", "-os=ubuntu-1804").DataDisk) assert.False(t, parseAndValidate(t, "-os=ubuntu-1804").DataDisk) assert.True(t, parseAndValidate(t, "-data_disk=true").DataDisk) assert.True(t, parseAndValidate(t, "-data_disk").DataDisk) } func TestBYOLDefaultsToFalse(t *testing.T) { assert.False(t, parseAndValidate(t).BYOL) } func TestBYOLIsSettable(t *testing.T) { assert.True(t, parseAndValidate(t, "-byol").BYOL) } func TestBYOLCanOnlyBeSpecifiedWhenDetectionEnabled(t *testing.T) { expectedError := "when -byol is specified, -data_disk, -os, and -custom_translate_workflow have to be empty" assert.Contains(t, expectFailedValidation(t, "-image_name=i", "-client_id=test", "-data_disk", "-byol").Error(), expectedError) assert.Contains(t, expectFailedValidation(t, "-image_name=i", "-client_id=test", "-os=ubuntu-1804", "-byol").Error(), expectedError) assert.Contains(t, expectFailedValidation(t, "-image_name=i", "-client_id=test", "-custom_translate_workflow=workflow.json", "-byol").Error(), expectedError) } func TestDurationHasDefaultValue(t *testing.T) { assert.Equal(t, time.Hour*2, parseAndValidate(t).Timeout) } func TestDurationIsSettable(t *testing.T) { assert.Equal(t, time.Hour*5, parseAndValidate(t, "-timeout=5h").Timeout) } func TestTrimCustomWorkflow(t *testing.T) { assert.Equal(t, "workflow.json", parseAndValidate(t, "-custom_translate_workflow", " workflow.json ").CustomWorkflow) } func TestValidateForConflictingArguments(t *testing.T) { assert.Contains(t, expectFailedValidation(t, "-data_disk", "-os=ubuntu-1604", "-client_id=c", "-image_name=i").Error(), "when -data_disk is specified, -os and -custom_translate_workflow should be empty") assert.Contains(t, expectFailedValidation(t, "-data_disk", "-custom_translate_workflow=file.json", "-client_id=c", "-image_name=i").Error(), "when -data_disk is specified, -os and -custom_translate_workflow should be empty") assert.Contains(t, expectFailedValidation(t, "-os=ubuntu-1804", "-custom_translate_workflow=file.json", "-client_id=c", "-image_name=i").Error(), "-os and -custom_translate_workflow can't be both specified") } func TestUEFISettable(t *testing.T) { assert.False(t, parseAndValidate(t, "-uefi_compatible=false").UefiCompatible) assert.True(t, parseAndValidate(t, "-uefi_compatible=true").UefiCompatible) assert.True(t, parseAndValidate(t, "-uefi_compatible").UefiCompatible) } func TestSysprepSettable(t *testing.T) { assert.False(t, parseAndValidate(t, "-sysprep_windows=false").SysprepWindows) assert.True(t, parseAndValidate(t, "-sysprep_windows=true").SysprepWindows) assert.True(t, parseAndValidate(t, "-sysprep_windows").SysprepWindows) } func TestImportArguments_DaisyAttrs(t *testing.T) { args := ImportArguments{ Project: "panda", Zone: "us-west", ScratchBucketGcsPath: "gs://bucket/path", Oauth: "oauth-info", Timeout: time.Hour * 3, ComputeEndpoint: "endpoint-uri", GcsLogsDisabled: true, CloudLogsDisabled: true, StdoutLogsDisabled: true, NoExternalIP: true, } expected := daisycommon.WorkflowAttributes{ Project: "panda", Zone: "us-west", GCSPath: "gs://bucket/path", OAuth: "oauth-info", Timeout: "3h0m0s", ComputeEndpoint: "endpoint-uri", DisableGCSLogs: true, DisableCloudLogs: true, DisableStdoutLogs: true, NoExternalIP: true, } assert.Equal(t, expected, args.DaisyAttrs()) } type mockPopulator struct { project string zone string region string scratchBucket string storageLocation string err error } func (m mockPopulator) PopulateMissingParameters(project *string, client string, zone *string, region *string, scratchBucketGcsPath *string, file string, storageLocation *string) error { if m.err != nil { return m.err } if *project == "" { *project = m.project } if *zone == "" { *zone = m.zone } if *region == "" { *region = m.region } if *scratchBucketGcsPath == "" { *scratchBucketGcsPath = m.scratchBucket } if *storageLocation == "" { *storageLocation = m.storageLocation } return nil } type mockSource struct { sourcePath string } func (m mockSource) Path() string { return m.sourcePath } type mockSourceFactory struct { err error expectedFile, expectedImage string t *testing.T } func (m mockSourceFactory) Init(sourceFile, sourceImage string) (Source, error) { // Skip parameter verification unless they were provided when mock was setup. if m.expectedFile != "" { assert.Equal(m.t, m.expectedFile, sourceFile) return mockSource{sourcePath: sourceFile}, m.err } if m.expectedImage != "" { assert.Equal(m.t, m.expectedImage, sourceImage) return mockSource{sourcePath: sourceImage}, m.err } return mockSource{}, m.err } func parseAndValidate(t *testing.T, args ...string) ImportArguments { actual := parse(t, args...) err := actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", }, mockSourceFactory{}) assert.NoError(t, err) return actual } func parse(t *testing.T, args ...string) ImportArguments { var hasClientID, hasImageName bool for _, arg := range args { if strings.HasPrefix(arg, "-client_id") { hasClientID = true } else if strings.HasPrefix(arg, "-image_name") { hasImageName = true } } if !hasClientID { args = append(args, "-client_id=pantheon") } if !hasImageName { args = append(args, "-image_name=name") } actual, err := NewImportArguments(args) assert.NoError(t, err) return actual } func expectFailedParse(t *testing.T, args ...string) error { _, err := NewImportArguments(args) assert.Error(t, err) return err } func expectFailedValidation(t *testing.T, args ...string) error { actual, err := NewImportArguments(args) assert.NoError(t, err) err = actual.ValidateAndPopulate(mockPopulator{ zone: "us-west2-a", region: "us-west2", }, mockSourceFactory{}) assert.Error(t, err) return err }
1
12,926
minor: use a non-default CE service account for testing (since default is handled in a specific way in the code)
GoogleCloudPlatform-compute-image-tools
go
@@ -84,6 +84,10 @@ func TestBootstrapWindowedPoSt(t *testing.T) { wd, _ := os.Getwd() genCfgPath := filepath.Join(wd, "..", "fixtures/setup.json") presealPath := filepath.Join(wd, "..", "fixtures/genesis-sectors") + // setup presealed sectors and uncomment to run test against sectors with larger sector size + //genCfgPath := filepath.Join("./512", "setup.json") + //presealPath := "./512" + genTime := int64(1000000000) blockTime := 1 * time.Second fakeClock := clock.NewFake(time.Unix(genTime, 0))
1
package functional import ( "context" "os" "path/filepath" "testing" "time" "github.com/filecoin-project/go-address" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-filecoin/build/project" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/internal/pkg/block" "github.com/filecoin-project/go-filecoin/internal/pkg/clock" "github.com/filecoin-project/go-filecoin/internal/pkg/constants" "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/iptbtester" tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" ) func TestBootstrapMineOnce(t *testing.T) { tf.FunctionalTest(t) ctx := context.Background() root := project.Root() tns, err := iptbtester.NewTestNodes(t, 1, nil) require.NoError(t, err) node0 := tns[0] // Setup first node, note: Testbed.Name() is the directory genConfigPath := filepath.Join(root, "fixtures/setup.json") genesis := iptbtester.RequireGenesisFromSetup(t, node0.Testbed.Name(), genConfigPath) genesis.SectorsDir = filepath.Join(node0.Dir(), "sectors") genesis.PresealedSectorDir = filepath.Join(root, "./fixtures/genesis-sectors") node0.MustInitWithGenesis(ctx, genesis) node0.MustStart(ctx) defer node0.MustStop(ctx) var minerAddress address.Address node0.MustRunCmdJSON(ctx, &minerAddress, "go-filecoin", "config", "mining.minerAddress") // Check the miner's initial power corresponds to 2 2kb sectors var status porcelain.MinerStatus node0.MustRunCmdJSON(ctx, &status, "go-filecoin", "miner", "status", minerAddress.String()) // expected miner power is 2 2kib sectors expectedMinerPower := constants.DevSectorSize * 2 actualMinerPower := status.Power.Uint64() assert.Equal(t, uint64(expectedMinerPower), status.Power.Uint64(), "expected miner power: %d actual miner power: %d", expectedMinerPower, actualMinerPower) // Assert that the chain head is genesis block var blocks []block.Block node0.MustRunCmdJSON(ctx, &blocks, "go-filecoin", "chain", "ls") require.Equal(t, 1, len(blocks)) assert.Equal(t, abi.ChainEpoch(0), blocks[0].Height) assert.Equal(t, big.Zero(), blocks[0].ParentWeight) assert.True(t, blocks[0].Parents.Equals(block.NewTipSetKey())) assert.Equal(t, builtin.SystemActorAddr, blocks[0].Miner) // Mine once node0.MustRunCmd(ctx, "go-filecoin", "mining", "once") // Assert that the chain head has now been mined by the miner node0.MustRunCmdJSON(ctx, &blocks, "go-filecoin", "chain", "ls") require.Equal(t, 1, len(blocks)) assert.Equal(t, minerAddress, blocks[0].Miner) } func TestBootstrapWindowedPoSt(t *testing.T) { tf.FunctionalTest(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() wd, _ := os.Getwd() genCfgPath := filepath.Join(wd, "..", "fixtures/setup.json") presealPath := filepath.Join(wd, "..", "fixtures/genesis-sectors") genTime := int64(1000000000) blockTime := 1 * time.Second fakeClock := clock.NewFake(time.Unix(genTime, 0)) // Load genesis config fixture. genCfg := loadGenesisConfig(t, genCfgPath) // set proving period start to something soon start := abi.ChainEpoch(1) genCfg.Miners[0].ProvingPeriodStart = &start seed := node.MakeChainSeed(t, genCfg) chainClock := clock.NewChainClockFromClock(uint64(genTime), blockTime, fakeClock) miner := makeNode(ctx, t, seed, chainClock) _, _, err := initNodeGenesisMiner(t, miner, seed, genCfg.Miners[0].Owner, presealPath, genCfg.Miners[0].SectorSize) require.NoError(t, err) err = miner.Start(ctx) require.NoError(t, err) err = miner.StorageMining.Start(ctx) require.NoError(t, err) maddr, err := miner.BlockMining.BlockMiningAPI.MinerAddress() require.NoError(t, err) status, err := miner.PorcelainAPI.MinerGetStatus(ctx, maddr, requireChainHead(t, miner)) require.NoError(t, err) require.Equal(t, abi.ChainEpoch(1), status.ProvingPeriodStart) // mine once to enter proving period fakeClock.Advance(blockTime) _, err = miner.BlockMining.BlockMiningAPI.MiningOnce(ctx) require.NoError(t, err) // Post should have been triggered, simulate mining while waiting for update to proving period start for i := 0; i < 25; i++ { fakeClock.Advance(blockTime) _, err := miner.BlockMining.BlockMiningAPI.MiningOnce(ctx) require.NoError(t, err) status, err := miner.PorcelainAPI.MinerGetStatus(ctx, maddr, requireChainHead(t, miner)) require.NoError(t, err) if status.ProvingPeriodStart > 1 { return } // If we mine too many blocks before the post is sent we could miss our window. Add some friction here. time.Sleep(2 * time.Second) } t.Fatal("Timouut wating for windowed PoSt") }
1
23,471
This is how I ran the 512MiB test. It's probably too much data for a fixture, and makes for longer functional tests (although still less than a minute on my laptop).
filecoin-project-venus
go
@@ -90,7 +90,9 @@ module Beaker # @return nil # @api public def resolve_symlinks! - @options[:hosts_file] = File.realpath(@options[:hosts_file]) if @options[:hosts_file] + if @options[:hosts_file] && !@options[:hosts_file_generated] + @options[:hosts_file] = File.realpath(@options[:hosts_file]) + end end #Converts array of paths into array of fully qualified git repo URLS with expanded keywords
1
require 'yaml' module Beaker module Options #An Object that parses, merges and normalizes all supported Beaker options and arguments class Parser GITREPO = 'git://github.com/puppetlabs' #These options can have the form of arg1,arg2 or [arg] or just arg, #should default to [] LONG_OPTS = [:helper, :load_path, :tests, :pre_suite, :post_suite, :install, :pre_cleanup, :modules] #These options expand out into an array of .rb files RB_FILE_OPTS = [:tests, :pre_suite, :post_suite, :pre_cleanup] PARSE_ERROR = Psych::SyntaxError #The OptionsHash of all parsed options attr_accessor :options # Returns the git repository used for git installations # @return [String] The git repository def repo GITREPO end # Returns a description of Beaker's supported arguments # @return [String] The usage String def usage @command_line_parser.usage end # Normalizes argument into an Array. Argument can either be converted into an array of a single value, # or can become an array of multiple values by splitting arg over ','. If argument is already an # array that array is returned untouched. # @example # split_arg([1, 2, 3]) == [1, 2, 3] # split_arg(1) == [1] # split_arg("1,2") == ["1", "2"] # split_arg(nil) == [] # @param [Array, String] arg Either an array or a string to be split into an array # @return [Array] An array of the form arg, [arg], or arg.split(',') def split_arg arg arry = [] if arg.is_a?(Array) arry += arg elsif arg =~ /,/ arry += arg.split(',') else arry << arg end arry end # Generates a list of files based upon a given path or list of paths. # # Looks recursively for .rb files in paths. # # @param [Array] paths Array of file paths to search for .rb files # @return [Array] An Array of fully qualified paths to .rb files # @raise [ArgumentError] Raises if no .rb files are found in searched directory or if # no .rb files are found overall def file_list(paths) files = [] if !paths.empty? paths.each do |root| @validator.validate_path(root) path_files = [] if File.file?(root) path_files << root elsif File.directory?(root) #expand and explore path_files = Dir.glob(File.join(root, '**/*.rb')) .select { |f| File.file?(f) } .sort_by { |file| [file.count('/'), file] } end @validator.validate_files(path_files, root) files += path_files end end @validator.validate_files(files, paths.to_s) files end # resolves all file symlinks that require it. This modifies @options. # # @note doing it here allows us to not need duplicate logic, which we # would need if we were doing it in the parser (--hosts & --config) # # @return nil # @api public def resolve_symlinks! @options[:hosts_file] = File.realpath(@options[:hosts_file]) if @options[:hosts_file] end #Converts array of paths into array of fully qualified git repo URLS with expanded keywords # #Supports the following keywords # PUPPET # FACTER # HIERA # HIERA-PUPPET #@example # opts = ["PUPPET/3.1"] # parse_git_repos(opts) == ["#{GITREPO}/puppet.git#3.1"] #@param [Array] git_opts An array of paths #@return [Array] An array of fully qualified git repo URLs with expanded keywords def parse_git_repos(git_opts) git_opts.map! { |opt| case opt when /^PUPPET\// opt = "#{repo}/puppet.git##{opt.split('/', 2)[1]}" when /^FACTER\// opt = "#{repo}/facter.git##{opt.split('/', 2)[1]}" when /^HIERA\// opt = "#{repo}/hiera.git##{opt.split('/', 2)[1]}" when /^HIERA-PUPPET\// opt = "#{repo}/hiera-puppet.git##{opt.split('/', 2)[1]}" end opt } git_opts end #Add the 'default' role to the host determined to be the default. If a host already has the role default then #do nothing. If more than a single host has the role 'default', raise error. #Default host determined to be 1) the only host in a single host configuration, 2) the host with the role 'master' #defined. #@param [Hash] hosts A hash of hosts, each identified by a String name. Each named host will have an Array of roles def set_default_host!(hosts) default = [] master = [] default_host_name = nil #look through the hosts and find any hosts with role 'default' and any hosts with role 'master' hosts.each_key do |name| host = hosts[name] if host[:roles].include?('default') default << name elsif host[:roles].include?('master') master << name end end # default_set? will throw an error if length > 1 # and return false if no default is set. if [email protected]_set?(default) #no default set, let's make one if not master.empty? and master.length == 1 default_host_name = master[0] elsif hosts.length == 1 default_host_name = hosts.keys[0] end if default_host_name hosts[default_host_name][:roles] << 'default' end end end #Constructor for Parser # def initialize @command_line_parser = Beaker::Options::CommandLineParser.new @presets = Beaker::Options::Presets.new @validator = Beaker::Options::Validator.new end # Parses ARGV or provided arguments array, file options, hosts options and combines with environment variables and # preset defaults to generate a Hash representing the Beaker options for a given test run # # Order of priority is as follows: # 1. environment variables are given top priority # 2. ARGV or provided arguments array # 3. the 'CONFIG' section of the hosts file # 4. options file values # 5. default or preset values are given the lowest priority # # @param [Array] args ARGV or a provided arguments array # @raise [ArgumentError] Raises error on bad input def parse_args(args = ARGV) @options = @presets.presets cmd_line_options = @command_line_parser.parse(args) cmd_line_options[:command_line] = ([$0] + args).join(' ') file_options = Beaker::Options::OptionsFileParser.parse_options_file(cmd_line_options[:options_file]) # merge together command line and file_options # overwrite file options with command line options cmd_line_and_file_options = file_options.merge(cmd_line_options) # merge command line and file options with defaults # overwrite defaults with command line and file options @options = @options.merge(cmd_line_and_file_options) if not @options[:help] and not @options[:beaker_version_print] #read the hosts file that contains the node configuration and hypervisor info hosts_options = Beaker::Options::HostsFileParser.parse_hosts_file(@options[:hosts_file]) # merge in host file vars # overwrite options (default, file options, command line) with host file options @options = @options.merge(hosts_options) # re-merge the command line options # overwrite options (default, file options, hosts file ) with command line arguments @options = @options.merge(cmd_line_options) # merge in env vars # overwrite options (default, file options, command line, hosts file) with env env_vars = @presets.env_vars @options = @options.merge(env_vars) normalize_args end @options end #Validate all merged options values for correctness # #Currently checks: # - each host has a valid platform # - if a keyfile is provided then use it # - paths provided to --test, --pre-suite, --post-suite provided lists of .rb files for testing # - --fail-mode is one of 'fast', 'stop' or nil # - if using blimpy hypervisor an EC2 YAML file exists # - if using the aix, solaris, or vcloud hypervisors a .fog file exists # - that one and only one master is defined per set of hosts # - that solaris/windows/aix hosts are agent only for PE tests OR # - sets the default host based upon machine definitions # - if an ssh user has been defined make it the host user # #@raise [ArgumentError] Raise if argument/options values are invalid def normalize_args @options['HOSTS'].each_key do |name| @validator.validate_platform(@options['HOSTS'][name], name) @options['HOSTS'][name]['platform'] = Platform.new(@options['HOSTS'][name]['platform']) end #use the keyfile if present if @options.has_key?(:keyfile) @options[:ssh][:keys] = [@options[:keyfile]] end #split out arguments - these arguments can have the form of arg1,arg2 or [arg] or just arg #will end up being normalized into an array LONG_OPTS.each do |opt| if @options.has_key?(opt) @options[opt] = split_arg(@options[opt]) if RB_FILE_OPTS.include?(opt) && (not @options[opt] == []) @options[opt] = file_list(@options[opt]) end if opt == :install @options[:install] = parse_git_repos(@options[:install]) end else @options[opt] = [] end end @validator.validate_fail_mode(@options[:fail_mode]) @validator.validate_preserve_hosts(@options[:preserve_hosts]) #check for config files necessary for different hypervisors hypervisors = get_hypervisors(@options[:HOSTS]) hypervisors.each do |visor| check_hypervisor_config(visor) end #check that roles of hosts make sense # - must be one and only one master master = 0 roles = get_roles(@options[:HOSTS]) roles.each do |role_array| master += 1 if role_array.include?('master') @validator.validate_frictionless_roles(role_array) end @validator.validate_master_count(master) #check that windows/el-4 boxes are only agents (solaris can be a master in foss cases) @options[:HOSTS].each_key do |name| host = @options[:HOSTS][name] if host[:platform] =~ /windows|el-4/ test_host_roles(name, host) end #check to see if a custom user account has been provided, if so use it if host[:ssh] && host[:ssh][:user] host[:user] = host[:ssh][:user] end # merge host tags for this host with the global/preset host tags host[:host_tags] = @options[:host_tags].merge(host[:host_tags] || {}) end normalize_tags! @validator.validate_tags(@options[:tag_includes], @options[:tag_excludes]) resolve_symlinks! #set the default role set_default_host!(@options[:HOSTS]) end # Get an array containing lists of roles by parsing each host in hosts. # # @param [Array<Array<String>>] hosts beaker hosts # @return [Array] roles [['master', 'database'], ['agent'], ...] def get_roles(hosts) roles = [] hosts.each_key do |name| roles << hosts[name][:roles] end roles end # Get a unique list of hypervisors from list of host. # # @param [Array] hosts beaker hosts # @return [Array] unique list of hypervisors def get_hypervisors(hosts) hypervisors = [] hosts.each_key { |name| hypervisors << hosts[name][:hypervisor].to_s } hypervisors.uniq end # Validate the config file for visor exists. # # @param [String] visor Hypervisor name # @return [nil] no return # @raise [ArgumentError] Raises error if config file does not exist or is not valid YAML def check_hypervisor_config(visor) if ['blimpy'].include?(visor) @validator.check_yaml_file(@options[:ec2_yaml], "required by #{visor}") end if %w(aix solaris vcloud).include?(visor) @validator.check_yaml_file(@options[:dot_fog], "required by #{visor}") end end # Normalize include and exclude tags. This modifies @options. # def normalize_tags! @options[:tag_includes] ||= '' @options[:tag_excludes] ||= '' @options[:tag_includes] = @options[:tag_includes].split(',') if @options[:tag_includes].respond_to?(:split) @options[:tag_excludes] = @options[:tag_excludes].split(',') if @options[:tag_excludes].respond_to?(:split) @options[:tag_includes].map!(&:downcase) @options[:tag_excludes].map!(&:downcase) end private # @api private def test_host_roles(host_name, host_hash) exclude_roles = %w(master database dashboard) host_roles = host_hash[:roles] unless (host_roles & exclude_roles).empty? @validator.parser_error "#{host_hash[:platform].to_s} box '#{host_name}' may not have roles: #{exclude_roles.join(', ')}." end end end end end
1
12,828
So we weren't able to hide the fact that the file wasn't pre-generated and had to introduce this "option" to detect that?
voxpupuli-beaker
rb
@@ -20,16 +20,11 @@ package http -import ( - "fmt" - - "go.uber.org/yarpc/api/yarpcerrors" -) - -// TODO: Should we expose the maps as public variables to document the mappings? +import "go.uber.org/yarpc/api/yarpcerrors" var ( - _codeToHTTPStatusCode = map[yarpcerrors.Code]int{ + // CodeToStatusCode maps all Codes to their corresponding HTTP status code. + CodeToStatusCode = map[yarpcerrors.Code]int{ yarpcerrors.CodeOK: 200, yarpcerrors.CodeCancelled: 499, yarpcerrors.CodeUnknown: 500,
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package http import ( "fmt" "go.uber.org/yarpc/api/yarpcerrors" ) // TODO: Should we expose the maps as public variables to document the mappings? var ( _codeToHTTPStatusCode = map[yarpcerrors.Code]int{ yarpcerrors.CodeOK: 200, yarpcerrors.CodeCancelled: 499, yarpcerrors.CodeUnknown: 500, yarpcerrors.CodeInvalidArgument: 400, yarpcerrors.CodeDeadlineExceeded: 504, yarpcerrors.CodeNotFound: 404, yarpcerrors.CodeAlreadyExists: 409, yarpcerrors.CodePermissionDenied: 403, yarpcerrors.CodeResourceExhausted: 429, yarpcerrors.CodeFailedPrecondition: 400, yarpcerrors.CodeAborted: 409, yarpcerrors.CodeOutOfRange: 400, yarpcerrors.CodeUnimplemented: 501, yarpcerrors.CodeInternal: 500, yarpcerrors.CodeUnavailable: 503, yarpcerrors.CodeDataLoss: 500, yarpcerrors.CodeUnauthenticated: 401, } _httpStatusCodeToCodes = map[int][]yarpcerrors.Code{ 200: {yarpcerrors.CodeOK}, 400: { yarpcerrors.CodeInvalidArgument, yarpcerrors.CodeFailedPrecondition, yarpcerrors.CodeOutOfRange, }, 401: {yarpcerrors.CodeUnauthenticated}, 403: {yarpcerrors.CodePermissionDenied}, 404: {yarpcerrors.CodeNotFound}, 409: { yarpcerrors.CodeAborted, yarpcerrors.CodeAlreadyExists, }, 429: {yarpcerrors.CodeResourceExhausted}, 499: {yarpcerrors.CodeCancelled}, 500: { yarpcerrors.CodeUnknown, yarpcerrors.CodeInternal, yarpcerrors.CodeDataLoss, }, 501: {yarpcerrors.CodeUnimplemented}, 503: {yarpcerrors.CodeUnavailable}, 504: {yarpcerrors.CodeDeadlineExceeded}, } ) // codeToHTTPStatusCode returns the HTTP status code for the given Code, // or error if the Code is unknown. func codeToHTTPStatusCode(code yarpcerrors.Code) (int, error) { statusCode, ok := _codeToHTTPStatusCode[code] if !ok { return 0, fmt.Errorf("unknown code: %v", code) } return statusCode, nil } // TODO: Is there any use to this? The original thinking was that it would be nice // to have a function that returns the most "general" yarpcerrors.Code for the given HTTP // status code, but this doesn't really work in practice. // httpStatusCodeToCodes returns the Codes that correspond to the given HTTP status // code, or nil if no Codes correspond to the given HTTP status code. func httpStatusCodeToCodes(httpStatusCode int) []yarpcerrors.Code { codes, ok := _httpStatusCodeToCodes[httpStatusCode] if !ok { return nil } c := make([]yarpcerrors.Code, len(codes)) copy(c, codes) return c }
1
14,553
Agree that exporting these maps is the best tradeoff, just registering my ongoing discontent with the lack of `const` collections.
yarpc-yarpc-go
go
@@ -351,6 +351,15 @@ class WebKitCaret(browsertab.AbstractCaret): def selection(self, callback): callback(self._widget.selectedText()) + def reverse_selection(self): + self._tab.run_js_async("""{ + let sel = window.getSelection(); + sel.setBaseAndExtent( + sel.extentNode, sel.extentOffset, sel.baseNode, + sel.baseOffset + ); + }""") + def _follow_selected(self, *, tab=False): if QWebSettings.globalSettings().testAttribute( QWebSettings.JavascriptEnabled):
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016-2018 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Wrapper over our (QtWebKit) WebView.""" import re import functools import xml.etree.ElementTree from PyQt5.QtCore import pyqtSlot, Qt, QUrl, QPoint, QTimer, QSizeF, QSize from PyQt5.QtGui import QIcon from PyQt5.QtWebKitWidgets import QWebPage, QWebFrame from PyQt5.QtWebKit import QWebSettings from PyQt5.QtPrintSupport import QPrinter from qutebrowser.browser import browsertab, shared from qutebrowser.browser.webkit import (webview, tabhistory, webkitelem, webkitsettings) from qutebrowser.utils import qtutils, usertypes, utils, log, debug from qutebrowser.qt import sip class WebKitAction(browsertab.AbstractAction): """QtWebKit implementations related to web actions.""" action_class = QWebPage action_base = QWebPage.WebAction def exit_fullscreen(self): raise browsertab.UnsupportedOperationError def save_page(self): """Save the current page.""" raise browsertab.UnsupportedOperationError def show_source(self, pygments=False): self._show_source_pygments() class WebKitPrinting(browsertab.AbstractPrinting): """QtWebKit implementations related to printing.""" def check_pdf_support(self): pass def check_printer_support(self): pass def check_preview_support(self): pass def to_pdf(self, filename): printer = QPrinter() printer.setOutputFileName(filename) self.to_printer(printer) def to_printer(self, printer, callback=None): self._widget.print(printer) # Can't find out whether there was an error... if callback is not None: callback(True) class WebKitSearch(browsertab.AbstractSearch): """QtWebKit implementations related to searching on the page.""" def __init__(self, tab, parent=None): super().__init__(tab, parent) self._flags = QWebPage.FindFlags(0) def _call_cb(self, callback, found, text, flags, caller): """Call the given callback if it's non-None. Delays the call via a QTimer so the website is re-rendered in between. Args: callback: What to call found: If the text was found text: The text searched for flags: The flags searched with caller: Name of the caller. """ found_text = 'found' if found else "didn't find" # Removing FindWrapsAroundDocument to get the same logging as with # QtWebEngine debug_flags = debug.qflags_key( QWebPage, flags & ~QWebPage.FindWrapsAroundDocument, klass=QWebPage.FindFlag) if debug_flags != '0x0000': flag_text = 'with flags {}'.format(debug_flags) else: flag_text = '' log.webview.debug(' '.join([caller, found_text, text, flag_text]) .strip()) if callback is not None: QTimer.singleShot(0, functools.partial(callback, found)) self.finished.emit(found) def clear(self): if self.search_displayed: self.cleared.emit() self.search_displayed = False # We first clear the marked text, then the highlights self._widget.findText('') self._widget.findText('', QWebPage.HighlightAllOccurrences) def search(self, text, *, ignore_case=usertypes.IgnoreCase.never, reverse=False, result_cb=None): # Don't go to next entry on duplicate search if self.text == text and self.search_displayed: log.webview.debug("Ignoring duplicate search request" " for {}".format(text)) return # Clear old search results, this is done automatically on QtWebEngine. self.clear() self.text = text self.search_displayed = True self._flags = QWebPage.FindWrapsAroundDocument if self._is_case_sensitive(ignore_case): self._flags |= QWebPage.FindCaseSensitively if reverse: self._flags |= QWebPage.FindBackward # We actually search *twice* - once to highlight everything, then again # to get a mark so we can navigate. found = self._widget.findText(text, self._flags) self._widget.findText(text, self._flags | QWebPage.HighlightAllOccurrences) self._call_cb(result_cb, found, text, self._flags, 'search') def next_result(self, *, result_cb=None): self.search_displayed = True found = self._widget.findText(self.text, self._flags) self._call_cb(result_cb, found, self.text, self._flags, 'next_result') def prev_result(self, *, result_cb=None): self.search_displayed = True # The int() here makes sure we get a copy of the flags. flags = QWebPage.FindFlags(int(self._flags)) if flags & QWebPage.FindBackward: flags &= ~QWebPage.FindBackward else: flags |= QWebPage.FindBackward found = self._widget.findText(self.text, flags) self._call_cb(result_cb, found, self.text, flags, 'prev_result') class WebKitCaret(browsertab.AbstractCaret): """QtWebKit implementations related to moving the cursor/selection.""" @pyqtSlot(usertypes.KeyMode) def _on_mode_entered(self, mode): if mode != usertypes.KeyMode.caret: return self.selection_enabled = self._widget.hasSelection() self.selection_toggled.emit(self.selection_enabled) settings = self._widget.settings() settings.setAttribute(QWebSettings.CaretBrowsingEnabled, True) if self._widget.isVisible(): # Sometimes the caret isn't immediately visible, but unfocusing # and refocusing it fixes that. self._widget.clearFocus() self._widget.setFocus(Qt.OtherFocusReason) # Move the caret to the first element in the viewport if there # isn't any text which is already selected. # # Note: We can't use hasSelection() here, as that's always # true in caret mode. if not self.selection_enabled: self._widget.page().currentFrame().evaluateJavaScript( utils.read_file('javascript/position_caret.js')) @pyqtSlot(usertypes.KeyMode) def _on_mode_left(self, _mode): settings = self._widget.settings() if settings.testAttribute(QWebSettings.CaretBrowsingEnabled): if self.selection_enabled and self._widget.hasSelection(): # Remove selection if it exists self._widget.triggerPageAction(QWebPage.MoveToNextChar) settings.setAttribute(QWebSettings.CaretBrowsingEnabled, False) self.selection_enabled = False def move_to_next_line(self, count=1): if not self.selection_enabled: act = QWebPage.MoveToNextLine else: act = QWebPage.SelectNextLine for _ in range(count): self._widget.triggerPageAction(act) def move_to_prev_line(self, count=1): if not self.selection_enabled: act = QWebPage.MoveToPreviousLine else: act = QWebPage.SelectPreviousLine for _ in range(count): self._widget.triggerPageAction(act) def move_to_next_char(self, count=1): if not self.selection_enabled: act = QWebPage.MoveToNextChar else: act = QWebPage.SelectNextChar for _ in range(count): self._widget.triggerPageAction(act) def move_to_prev_char(self, count=1): if not self.selection_enabled: act = QWebPage.MoveToPreviousChar else: act = QWebPage.SelectPreviousChar for _ in range(count): self._widget.triggerPageAction(act) def move_to_end_of_word(self, count=1): if not self.selection_enabled: act = [QWebPage.MoveToNextWord] if utils.is_windows: # pragma: no cover act.append(QWebPage.MoveToPreviousChar) else: act = [QWebPage.SelectNextWord] if utils.is_windows: # pragma: no cover act.append(QWebPage.SelectPreviousChar) for _ in range(count): for a in act: self._widget.triggerPageAction(a) def move_to_next_word(self, count=1): if not self.selection_enabled: act = [QWebPage.MoveToNextWord] if not utils.is_windows: # pragma: no branch act.append(QWebPage.MoveToNextChar) else: act = [QWebPage.SelectNextWord] if not utils.is_windows: # pragma: no branch act.append(QWebPage.SelectNextChar) for _ in range(count): for a in act: self._widget.triggerPageAction(a) def move_to_prev_word(self, count=1): if not self.selection_enabled: act = QWebPage.MoveToPreviousWord else: act = QWebPage.SelectPreviousWord for _ in range(count): self._widget.triggerPageAction(act) def move_to_start_of_line(self): if not self.selection_enabled: act = QWebPage.MoveToStartOfLine else: act = QWebPage.SelectStartOfLine self._widget.triggerPageAction(act) def move_to_end_of_line(self): if not self.selection_enabled: act = QWebPage.MoveToEndOfLine else: act = QWebPage.SelectEndOfLine self._widget.triggerPageAction(act) def move_to_start_of_next_block(self, count=1): if not self.selection_enabled: act = [QWebPage.MoveToNextLine, QWebPage.MoveToStartOfBlock] else: act = [QWebPage.SelectNextLine, QWebPage.SelectStartOfBlock] for _ in range(count): for a in act: self._widget.triggerPageAction(a) def move_to_start_of_prev_block(self, count=1): if not self.selection_enabled: act = [QWebPage.MoveToPreviousLine, QWebPage.MoveToStartOfBlock] else: act = [QWebPage.SelectPreviousLine, QWebPage.SelectStartOfBlock] for _ in range(count): for a in act: self._widget.triggerPageAction(a) def move_to_end_of_next_block(self, count=1): if not self.selection_enabled: act = [QWebPage.MoveToNextLine, QWebPage.MoveToEndOfBlock] else: act = [QWebPage.SelectNextLine, QWebPage.SelectEndOfBlock] for _ in range(count): for a in act: self._widget.triggerPageAction(a) def move_to_end_of_prev_block(self, count=1): if not self.selection_enabled: act = [QWebPage.MoveToPreviousLine, QWebPage.MoveToEndOfBlock] else: act = [QWebPage.SelectPreviousLine, QWebPage.SelectEndOfBlock] for _ in range(count): for a in act: self._widget.triggerPageAction(a) def move_to_start_of_document(self): if not self.selection_enabled: act = QWebPage.MoveToStartOfDocument else: act = QWebPage.SelectStartOfDocument self._widget.triggerPageAction(act) def move_to_end_of_document(self): if not self.selection_enabled: act = QWebPage.MoveToEndOfDocument else: act = QWebPage.SelectEndOfDocument self._widget.triggerPageAction(act) def toggle_selection(self): self.selection_enabled = not self.selection_enabled self.selection_toggled.emit(self.selection_enabled) def drop_selection(self): self._widget.triggerPageAction(QWebPage.MoveToNextChar) def selection(self, callback): callback(self._widget.selectedText()) def _follow_selected(self, *, tab=False): if QWebSettings.globalSettings().testAttribute( QWebSettings.JavascriptEnabled): if tab: self._tab.data.override_target = usertypes.ClickTarget.tab self._tab.run_js_async(""" const aElm = document.activeElement; if (window.getSelection().anchorNode) { window.getSelection().anchorNode.parentNode.click(); } else if (aElm && aElm !== document.body) { aElm.click(); } """) else: selection = self._widget.selectedHtml() if not selection: # Getting here may mean we crashed, but we can't do anything # about that until this commit is released: # https://github.com/annulen/webkit/commit/0e75f3272d149bc64899c161f150eb341a2417af # TODO find a way to check if something is focused self._follow_enter(tab) return try: selected_element = xml.etree.ElementTree.fromstring( '<html>{}</html>'.format(selection)).find('a') except xml.etree.ElementTree.ParseError: raise browsertab.WebTabError('Could not parse selected ' 'element!') if selected_element is not None: try: url = selected_element.attrib['href'] except KeyError: raise browsertab.WebTabError('Anchor element without ' 'href!') url = self._tab.url().resolved(QUrl(url)) if tab: self._tab.new_tab_requested.emit(url) else: self._tab.load_url(url) def follow_selected(self, *, tab=False): try: self._follow_selected(tab=tab) finally: self.follow_selected_done.emit() class WebKitZoom(browsertab.AbstractZoom): """QtWebKit implementations related to zooming.""" def _set_factor_internal(self, factor): self._widget.setZoomFactor(factor) class WebKitScroller(browsertab.AbstractScroller): """QtWebKit implementations related to scrolling.""" # FIXME:qtwebengine When to use the main frame, when the current one? def pos_px(self): return self._widget.page().mainFrame().scrollPosition() def pos_perc(self): return self._widget.scroll_pos def to_point(self, point): self._widget.page().mainFrame().setScrollPosition(point) def to_anchor(self, name): self._widget.page().mainFrame().scrollToAnchor(name) def delta(self, x=0, y=0): qtutils.check_overflow(x, 'int') qtutils.check_overflow(y, 'int') self._widget.page().mainFrame().scroll(x, y) def delta_page(self, x=0.0, y=0.0): if y.is_integer(): y = int(y) if y == 0: pass elif y < 0: self.page_up(count=-y) elif y > 0: self.page_down(count=y) y = 0 if x == 0 and y == 0: return size = self._widget.page().mainFrame().geometry() self.delta(x * size.width(), y * size.height()) def to_perc(self, x=None, y=None): if x is None and y == 0: self.top() elif x is None and y == 100: self.bottom() else: for val, orientation in [(x, Qt.Horizontal), (y, Qt.Vertical)]: if val is not None: frame = self._widget.page().mainFrame() maximum = frame.scrollBarMaximum(orientation) if maximum == 0: continue pos = int(maximum * val / 100) pos = qtutils.check_overflow(pos, 'int', fatal=False) frame.setScrollBarValue(orientation, pos) def _key_press(self, key, count=1, getter_name=None, direction=None): frame = self._widget.page().mainFrame() getter = None if getter_name is None else getattr(frame, getter_name) # FIXME:qtwebengine needed? # self._widget.setFocus() for _ in range(min(count, 5000)): # Abort scrolling if the minimum/maximum was reached. if (getter is not None and frame.scrollBarValue(direction) == getter(direction)): return self._tab.fake_key_press(key) def up(self, count=1): self._key_press(Qt.Key_Up, count, 'scrollBarMinimum', Qt.Vertical) def down(self, count=1): self._key_press(Qt.Key_Down, count, 'scrollBarMaximum', Qt.Vertical) def left(self, count=1): self._key_press(Qt.Key_Left, count, 'scrollBarMinimum', Qt.Horizontal) def right(self, count=1): self._key_press(Qt.Key_Right, count, 'scrollBarMaximum', Qt.Horizontal) def top(self): self._key_press(Qt.Key_Home) def bottom(self): self._key_press(Qt.Key_End) def page_up(self, count=1): self._key_press(Qt.Key_PageUp, count, 'scrollBarMinimum', Qt.Vertical) def page_down(self, count=1): self._key_press(Qt.Key_PageDown, count, 'scrollBarMaximum', Qt.Vertical) def at_top(self): return self.pos_px().y() == 0 def at_bottom(self): frame = self._widget.page().currentFrame() return self.pos_px().y() >= frame.scrollBarMaximum(Qt.Vertical) class WebKitHistoryPrivate(browsertab.AbstractHistoryPrivate): """History-related methods which are not part of the extension API.""" def serialize(self): return qtutils.serialize(self._history) def deserialize(self, data): qtutils.deserialize(data, self._history) def load_items(self, items): if items: self._tab.before_load_started.emit(items[-1].url) stream, _data, user_data = tabhistory.serialize(items) qtutils.deserialize_stream(stream, self._history) for i, data in enumerate(user_data): self._history.itemAt(i).setUserData(data) cur_data = self._history.currentItem().userData() if cur_data is not None: if 'zoom' in cur_data: self._tab.zoom.set_factor(cur_data['zoom']) if ('scroll-pos' in cur_data and self._tab.scroller.pos_px() == QPoint(0, 0)): QTimer.singleShot(0, functools.partial( self._tab.scroller.to_point, cur_data['scroll-pos'])) class WebKitHistory(browsertab.AbstractHistory): """QtWebKit implementations related to page history.""" def __init__(self, tab): super().__init__(tab) self.private_api = WebKitHistoryPrivate(tab) def __len__(self): return len(self._history) def __iter__(self): return iter(self._history.items()) def current_idx(self): return self._history.currentItemIndex() def can_go_back(self): return self._history.canGoBack() def can_go_forward(self): return self._history.canGoForward() def _item_at(self, i): return self._history.itemAt(i) def _go_to_item(self, item): self._tab.before_load_started.emit(item.url()) self._history.goToItem(item) class WebKitElements(browsertab.AbstractElements): """QtWebKit implemementations related to elements on the page.""" def find_css(self, selector, callback, error_cb, *, only_visible=False): utils.unused(error_cb) mainframe = self._widget.page().mainFrame() if mainframe is None: raise browsertab.WebTabError("No frame focused!") elems = [] frames = webkitelem.get_child_frames(mainframe) for f in frames: for elem in f.findAllElements(selector): elems.append(webkitelem.WebKitElement(elem, tab=self._tab)) if only_visible: # pylint: disable=protected-access elems = [e for e in elems if e._is_visible(mainframe)] # pylint: enable=protected-access callback(elems) def find_id(self, elem_id, callback): def find_id_cb(elems): """Call the real callback with the found elements.""" if not elems: callback(None) else: callback(elems[0]) # Escape non-alphanumeric characters in the selector # https://www.w3.org/TR/CSS2/syndata.html#value-def-identifier elem_id = re.sub(r'[^a-zA-Z0-9_-]', r'\\\g<0>', elem_id) self.find_css('#' + elem_id, find_id_cb, error_cb=lambda exc: None) def find_focused(self, callback): frame = self._widget.page().currentFrame() if frame is None: callback(None) return elem = frame.findFirstElement('*:focus') if elem.isNull(): callback(None) else: callback(webkitelem.WebKitElement(elem, tab=self._tab)) def find_at_pos(self, pos, callback): assert pos.x() >= 0 assert pos.y() >= 0 frame = self._widget.page().frameAt(pos) if frame is None: # This happens when we click inside the webview, but not actually # on the QWebPage - for example when clicking the scrollbar # sometimes. log.webview.debug("Hit test at {} but frame is None!".format(pos)) callback(None) return # You'd think we have to subtract frame.geometry().topLeft() from the # position, but it seems QWebFrame::hitTestContent wants a position # relative to the QWebView, not to the frame. This makes no sense to # me, but it works this way. hitresult = frame.hitTestContent(pos) if hitresult.isNull(): # For some reason, the whole hit result can be null sometimes (e.g. # on doodle menu links). log.webview.debug("Hit test result is null!") callback(None) return try: elem = webkitelem.WebKitElement(hitresult.element(), tab=self._tab) except webkitelem.IsNullError: # For some reason, the hit result element can be a null element # sometimes (e.g. when clicking the timetable fields on # http://www.sbb.ch/ ). log.webview.debug("Hit test result element is null!") callback(None) return callback(elem) class WebKitAudio(browsertab.AbstractAudio): """Dummy handling of audio status for QtWebKit.""" def set_muted(self, muted: bool, override: bool = False) -> None: raise browsertab.WebTabError('Muting is not supported on QtWebKit!') def is_muted(self): return False def is_recently_audible(self): return False class WebKitTabPrivate(browsertab.AbstractTabPrivate): """QtWebKit-related methods which aren't part of the public API.""" def networkaccessmanager(self): return self._widget.page().networkAccessManager() def user_agent(self): page = self._widget.page() return page.userAgentForUrl(self._tab.url()) def clear_ssl_errors(self): self.networkaccessmanager().clear_all_ssl_errors() def event_target(self): return self._widget def shutdown(self): self._widget.shutdown() class WebKitTab(browsertab.AbstractTab): """A QtWebKit tab in the browser.""" def __init__(self, *, win_id, mode_manager, private, parent=None): super().__init__(win_id=win_id, private=private, parent=parent) widget = webview.WebView(win_id=win_id, tab_id=self.tab_id, private=private, tab=self) if private: self._make_private(widget) self.history = WebKitHistory(tab=self) self.scroller = WebKitScroller(tab=self, parent=self) self.caret = WebKitCaret(mode_manager=mode_manager, tab=self, parent=self) self.zoom = WebKitZoom(tab=self, parent=self) self.search = WebKitSearch(tab=self, parent=self) self.printing = WebKitPrinting(tab=self) self.elements = WebKitElements(tab=self) self.action = WebKitAction(tab=self) self.audio = WebKitAudio(tab=self, parent=self) self.private_api = WebKitTabPrivate(mode_manager=mode_manager, tab=self) # We're assigning settings in _set_widget self.settings = webkitsettings.WebKitSettings(settings=None) self._set_widget(widget) self._connect_signals() self.backend = usertypes.Backend.QtWebKit def _install_event_filter(self): self._widget.installEventFilter(self._mouse_event_filter) def _make_private(self, widget): settings = widget.settings() settings.setAttribute(QWebSettings.PrivateBrowsingEnabled, True) def load_url(self, url, *, emit_before_load_started=True): self._load_url_prepare( url, emit_before_load_started=emit_before_load_started) self._widget.load(url) def url(self, *, requested=False): frame = self._widget.page().mainFrame() if requested: return frame.requestedUrl() else: return frame.url() def dump_async(self, callback, *, plain=False): frame = self._widget.page().mainFrame() if plain: callback(frame.toPlainText()) else: callback(frame.toHtml()) def run_js_async(self, code, callback=None, *, world=None): if world is not None and world != usertypes.JsWorld.jseval: log.webview.warning("Ignoring world ID {}".format(world)) document_element = self._widget.page().mainFrame().documentElement() result = document_element.evaluateJavaScript(code) if callback is not None: callback(result) def icon(self): return self._widget.icon() def reload(self, *, force=False): if force: action = QWebPage.ReloadAndBypassCache else: action = QWebPage.Reload self._widget.triggerPageAction(action) def stop(self): self._widget.stop() def title(self): return self._widget.title() @pyqtSlot() def _on_history_trigger(self): url = self.url() requested_url = self.url(requested=True) self.history_item_triggered.emit(url, requested_url, self.title()) def set_html(self, html, base_url=QUrl()): self._widget.setHtml(html, base_url) @pyqtSlot() def _on_load_started(self): super()._on_load_started() nam = self._widget.page().networkAccessManager() nam.netrc_used = False # Make sure the icon is cleared when navigating to a page without one. self.icon_changed.emit(QIcon()) @pyqtSlot() def _on_frame_load_finished(self): """Make sure we emit an appropriate status when loading finished. While Qt has a bool "ok" attribute for loadFinished, it always is True when using error pages... See https://github.com/qutebrowser/qutebrowser/issues/84 """ self._on_load_finished(not self._widget.page().error_occurred) @pyqtSlot() def _on_webkit_icon_changed(self): """Emit iconChanged with a QIcon like QWebEngineView does.""" if sip.isdeleted(self._widget): log.webview.debug("Got _on_webkit_icon_changed for deleted view!") return self.icon_changed.emit(self._widget.icon()) @pyqtSlot(QWebFrame) def _on_frame_created(self, frame): """Connect the contentsSizeChanged signal of each frame.""" # FIXME:qtwebengine those could theoretically regress: # https://github.com/qutebrowser/qutebrowser/issues/152 # https://github.com/qutebrowser/qutebrowser/issues/263 frame.contentsSizeChanged.connect(self._on_contents_size_changed) @pyqtSlot(QSize) def _on_contents_size_changed(self, size): self.contents_size_changed.emit(QSizeF(size)) @pyqtSlot(usertypes.NavigationRequest) def _on_navigation_request(self, navigation): super()._on_navigation_request(navigation) if not navigation.accepted: return log.webview.debug("target {} override {}".format( self.data.open_target, self.data.override_target)) if self.data.override_target is not None: target = self.data.override_target self.data.override_target = None else: target = self.data.open_target if (navigation.navigation_type == navigation.Type.link_clicked and target != usertypes.ClickTarget.normal): tab = shared.get_tab(self.win_id, target) tab.load_url(navigation.url) self.data.open_target = usertypes.ClickTarget.normal navigation.accepted = False if navigation.is_main_frame: self.settings.update_for_url(navigation.url) @pyqtSlot() def _on_ssl_errors(self): self._has_ssl_errors = True def _connect_signals(self): view = self._widget page = view.page() frame = page.mainFrame() page.windowCloseRequested.connect(self.window_close_requested) page.linkHovered.connect(self.link_hovered) page.loadProgress.connect(self._on_load_progress) frame.loadStarted.connect(self._on_load_started) view.scroll_pos_changed.connect(self.scroller.perc_changed) view.titleChanged.connect(self.title_changed) view.urlChanged.connect(self._on_url_changed) view.shutting_down.connect(self.shutting_down) page.networkAccessManager().sslErrors.connect(self._on_ssl_errors) frame.loadFinished.connect(self._on_frame_load_finished) view.iconChanged.connect(self._on_webkit_icon_changed) page.frameCreated.connect(self._on_frame_created) frame.contentsSizeChanged.connect(self._on_contents_size_changed) frame.initialLayoutCompleted.connect(self._on_history_trigger) page.navigation_request.connect(self._on_navigation_request)
1
22,977
This could probably use `const` as well?
qutebrowser-qutebrowser
py
@@ -40,7 +40,7 @@ import ( // file. The Restore method replaces the contents of the original with the // backup, overwriting any changes that were made since the backup was created. type File struct { - orig, tmp string // file paths + Orig, Tmp string // file paths } // New creates a backup copy of the specified file, located in the same
1
/* * Copyright 2018 The Kythe Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package backup is a simple library for backing up a config file and restoring // it using a temporary file. // // Example usage: // // tmp, err := backup.Save(someFile) // if err != nil { // return fmt.Errorf("backing up %q: %v", somePath, err) // } // defer tmp.Release() // // ... do real work ... // tmp.Restore() package backup // import "kythe.io/kythe/go/extractors/config/runextractor/backup" import ( "io" "io/ioutil" "log" "os" "path/filepath" ) // A File records the locations of an original file and a backup copy of that // file. The Restore method replaces the contents of the original with the // backup, overwriting any changes that were made since the backup was created. type File struct { orig, tmp string // file paths } // New creates a backup copy of the specified file, located in the same // directory. The caller should ensure the Release method is called when the // backup is no longer needed, to clean up. func New(orig string) (*File, error) { f, err := os.Open(orig) if err != nil { return nil, err } defer f.Close() dir := filepath.Dir(orig) base := filepath.Base(orig) tf, err := ioutil.TempFile(dir, base+".bkup") if err != nil { return nil, err } _, err = io.Copy(tf, f) cerr := tf.Close() if err != nil { return nil, err } else if cerr != nil { return nil, cerr } return &File{orig, tf.Name()}, nil } // Restore puts the original version of the backed up file back in place. func (f *File) Restore() error { if err := os.Rename(f.tmp, f.orig); err != nil { return err } f.tmp = "" return nil } // Release removes the temporary file copy if it hasn't already been moved. func (f *File) Release() { if f.tmp != "" { if err := os.Remove(f.tmp); err != nil { log.Printf("Warning: removing backup of %q failed: %v", f.orig, err) } } }
1
12,031
it doesn't look like these need to be exported?
kythe-kythe
go
@@ -5,6 +5,7 @@ package fs import ( "bytes" "errors" + "fmt" "io/ioutil" "os" "path/filepath"
1
// +build linux package fs import ( "bytes" "errors" "io/ioutil" "os" "path/filepath" "github.com/moby/sys/mountinfo" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" ) type CpusetGroup struct { } func (s *CpusetGroup) Name() string { return "cpuset" } func (s *CpusetGroup) Apply(d *cgroupData) error { dir, err := d.path("cpuset") if err != nil && !cgroups.IsNotFound(err) { return err } return s.ApplyDir(dir, d.config, d.pid) } func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error { if cgroup.Resources.CpusetCpus != "" { if err := fscommon.WriteFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil { return err } } if cgroup.Resources.CpusetMems != "" { if err := fscommon.WriteFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil { return err } } return nil } func (s *CpusetGroup) Remove(d *cgroupData) error { return removePath(d.path("cpuset")) } func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { return nil } // Get the source mount point of directory passed in as argument. func getMount(dir string) (string, error) { mi, err := mountinfo.GetMounts(mountinfo.ParentsFilter(dir)) if err != nil { return "", err } if len(mi) < 1 { return "", fmt.Errorf("Can't find mount point of %s", dir) } // find the longest mount point var idx, maxlen int for i := range mi { if len(mi[i].Mountpoint) > maxlen { maxlen = len(mi[i].Mountpoint) idx = i } } return mi[idx].Mountpoint, nil } func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error { // This might happen if we have no cpuset cgroup mounted. // Just do nothing and don't fail. if dir == "" { return nil } root, err := getMount(dir) if err != nil { return err } root = filepath.Dir(root) // 'ensureParent' start with parent because we don't want to // explicitly inherit from parent, it could conflict with // 'cpuset.cpu_exclusive'. if err := s.ensureParent(filepath.Dir(dir), root); err != nil { return err } if err := os.MkdirAll(dir, 0755); err != nil { return err } // We didn't inherit cpuset configs from parent, but we have // to ensure cpuset configs are set before moving task into the // cgroup. // The logic is, if user specified cpuset configs, use these // specified configs, otherwise, inherit from parent. This makes // cpuset configs work correctly with 'cpuset.cpu_exclusive', and // keep backward compatibility. if err := s.ensureCpusAndMems(dir, cgroup); err != nil { return err } // because we are not using d.join we need to place the pid into the procs file // unlike the other subsystems return cgroups.WriteCgroupProc(dir, pid) } func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { return } if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { return } return cpus, mems, nil } // ensureParent makes sure that the parent directory of current is created // and populated with the proper cpus and mems files copied from // it's parent. func (s *CpusetGroup) ensureParent(current, root string) error { parent := filepath.Dir(current) if libcontainerUtils.CleanPath(parent) == root { return nil } // Avoid infinite recursion. if parent == current { return errors.New("cpuset: cgroup parent path outside cgroup root") } if err := s.ensureParent(parent, root); err != nil { return err } if err := os.MkdirAll(current, 0755); err != nil { return err } return s.copyIfNeeded(current, parent) } // copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent // directory to the current directory if the file's contents are 0 func (s *CpusetGroup) copyIfNeeded(current, parent string) error { var ( err error currentCpus, currentMems []byte parentCpus, parentMems []byte ) if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { return err } if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { return err } if s.isEmpty(currentCpus) { if err := fscommon.WriteFile(current, "cpuset.cpus", string(parentCpus)); err != nil { return err } } if s.isEmpty(currentMems) { if err := fscommon.WriteFile(current, "cpuset.mems", string(parentMems)); err != nil { return err } } return nil } func (s *CpusetGroup) isEmpty(b []byte) bool { return len(bytes.Trim(b, "\n")) == 0 } func (s *CpusetGroup) ensureCpusAndMems(path string, cgroup *configs.Cgroup) error { if err := s.Set(path, cgroup); err != nil { return err } return s.copyIfNeeded(path, filepath.Dir(path)) }
1
20,110
I think you should remove "fmt" here. And change `fmt.Errorf` to `errors.Errorf`.
opencontainers-runc
go
@@ -0,0 +1,6 @@ +class AddVideoAttributesToProducts < ActiveRecord::Migration + def change + add_column :products, :length_in_days, :integer + add_column :products, :resources, :text, default: "", null: false + end +end
1
1
11,548
Here's the `resources` attribute @jferris. I'm not against renaming `Product` to `Resource`, and this one... something else.
thoughtbot-upcase
rb
@@ -11,7 +11,7 @@ import ( // one of the workers, it is returned. FinalFunc is always run, regardless of // any other previous errors. func RunWorkers(ctx context.Context, count int, workerFunc func() error, finalFunc func()) error { - wg, ctx := errgroup.WithContext(ctx) + wg, _ := errgroup.WithContext(ctx) // run workers for i := 0; i < count; i++ {
1
package repository import ( "context" "golang.org/x/sync/errgroup" ) // RunWorkers runs count instances of workerFunc using an errgroup.Group. // After all workers have terminated, finalFunc is run. If an error occurs in // one of the workers, it is returned. FinalFunc is always run, regardless of // any other previous errors. func RunWorkers(ctx context.Context, count int, workerFunc func() error, finalFunc func()) error { wg, ctx := errgroup.WithContext(ctx) // run workers for i := 0; i < count; i++ { wg.Go(workerFunc) } // wait for termination err := wg.Wait() // make sure finalFunc is run finalFunc() // return error from workers to the caller return err }
1
12,183
If the context is unused, this is equivalent to `var wg errgroup.Group`.
restic-restic
go
@@ -47,6 +47,8 @@ public interface HighlightParams { // sizing public static final String FRAGSIZE = HIGHLIGHT+".fragsize"; // OH, FVH, UH + public static final String FRAGALIGNRATIO = HIGHLIGHT+".fragAlignRatio"; // UH + public static final String FRAGSIZEISMINIMUM = HIGHLIGHT+".fragsizeIsMinimum"; // UH public static final String FRAGMENTER = HIGHLIGHT+".fragmenter"; // OH public static final String INCREMENT = HIGHLIGHT+".increment"; // OH public static final String REGEX = "regex"; // OH
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.common.params; /** * * @since solr 1.3 */ public interface HighlightParams { // primary public static final String HIGHLIGHT = "hl"; public static final String METHOD = HIGHLIGHT+".method"; // original|fastVector|postings|unified public static final String FIELDS = HIGHLIGHT+".fl"; public static final String SNIPPETS = HIGHLIGHT+".snippets"; // KEY: // OH = (original) Highlighter (AKA the standard Highlighter) // FVH = FastVectorHighlighter // UH = UnifiedHighlighter (evolved from PostingsHighlighter) // query interpretation public static final String Q = HIGHLIGHT+".q"; // all public static final String QPARSER = HIGHLIGHT+".qparser"; // all public static final String FIELD_MATCH = HIGHLIGHT+".requireFieldMatch"; // OH, FVH, UH public static final String USE_PHRASE_HIGHLIGHTER = HIGHLIGHT+".usePhraseHighlighter"; // OH, FVH, UH public static final String HIGHLIGHT_MULTI_TERM = HIGHLIGHT+".highlightMultiTerm"; // all // if no snippets... public static final String DEFAULT_SUMMARY = HIGHLIGHT + ".defaultSummary"; // UH public static final String ALTERNATE_FIELD = HIGHLIGHT+".alternateField"; // OH, FVH public static final String ALTERNATE_FIELD_LENGTH = HIGHLIGHT+".maxAlternateFieldLength"; // OH, FVH public static final String HIGHLIGHT_ALTERNATE = HIGHLIGHT+".highlightAlternate"; // OH, FVH // sizing public static final String FRAGSIZE = HIGHLIGHT+".fragsize"; // OH, FVH, UH public static final String FRAGMENTER = HIGHLIGHT+".fragmenter"; // OH public static final String INCREMENT = HIGHLIGHT+".increment"; // OH public static final String REGEX = "regex"; // OH public static final String SLOP = HIGHLIGHT+"."+REGEX+".slop"; // OH public static final String PATTERN = HIGHLIGHT+"."+REGEX+".pattern"; // OH public static final String MAX_RE_CHARS= HIGHLIGHT+"."+REGEX+".maxAnalyzedChars"; // OH public static final String BOUNDARY_SCANNER = HIGHLIGHT+".boundaryScanner"; // FVH public static final String BS_MAX_SCAN = HIGHLIGHT+".bs.maxScan"; // FVH public static final String BS_CHARS = HIGHLIGHT+".bs.chars"; // FVH public static final String BS_TYPE = HIGHLIGHT+".bs.type"; // FVH, UH public static final String BS_LANGUAGE = HIGHLIGHT+".bs.language"; // FVH, UH public static final String BS_COUNTRY = HIGHLIGHT+".bs.country"; // FVH, UH public static final String BS_VARIANT = HIGHLIGHT+".bs.variant"; // FVH, UH public static final String BS_SEP = HIGHLIGHT+".bs.separator"; // UH // formatting public static final String FORMATTER = HIGHLIGHT+".formatter"; // OH public static final String ENCODER = HIGHLIGHT+".encoder"; // all public static final String MERGE_CONTIGUOUS_FRAGMENTS = HIGHLIGHT + ".mergeContiguous"; // OH public static final String SIMPLE = "simple"; // OH public static final String SIMPLE_PRE = HIGHLIGHT+"."+SIMPLE+".pre"; // OH public static final String SIMPLE_POST = HIGHLIGHT+"."+SIMPLE+".post"; // OH public static final String FRAGMENTS_BUILDER = HIGHLIGHT+".fragmentsBuilder"; // FVH public static final String TAG_PRE = HIGHLIGHT + ".tag.pre"; // FVH, UH public static final String TAG_POST = HIGHLIGHT + ".tag.post"; // FVH, UH public static final String TAG_ELLIPSIS= HIGHLIGHT + ".tag.ellipsis"; // FVH, UH public static final String MULTI_VALUED_SEPARATOR = HIGHLIGHT + ".multiValuedSeparatorChar"; // FVH // ordering public static final String PRESERVE_MULTI = HIGHLIGHT+".preserveMulti"; // OH public static final String FRAG_LIST_BUILDER = HIGHLIGHT+".fragListBuilder"; // FVH public static final String SCORE = "score"; // UH public static final String SCORE_K1 = HIGHLIGHT +"."+SCORE+".k1"; // UH public static final String SCORE_B = HIGHLIGHT +"."+SCORE+".b"; // UH public static final String SCORE_PIVOT = HIGHLIGHT +"."+SCORE+".pivot"; // UH // misc public static final String MAX_CHARS = HIGHLIGHT+".maxAnalyzedChars"; // all public static final String PAYLOADS = HIGHLIGHT+".payloads"; // OH public static final String MAX_MULTIVALUED_TO_EXAMINE = HIGHLIGHT + ".maxMultiValuedToExamine"; // OH public static final String MAX_MULTIVALUED_TO_MATCH = HIGHLIGHT + ".maxMultiValuedToMatch"; // OH public static final String PHRASE_LIMIT = HIGHLIGHT + ".phraseLimit"; // FVH public static final String OFFSET_SOURCE = HIGHLIGHT + ".offsetSource"; // UH public static final String CACHE_FIELD_VAL_CHARS_THRESHOLD = HIGHLIGHT + ".cacheFieldValCharsThreshold"; // UH public static final String WEIGHT_MATCHES = HIGHLIGHT + ".weightMatches"; // UH }
1
31,780
very minor: I'd prefer these two added rows are switched so that fragsizeIsMinimum directly follows fragsize
apache-lucene-solr
java
@@ -98,6 +98,10 @@ def is_local_interface(host): if ':' in host: host = host.split(':',1)[0] + # If "localhost" or a loopback IP has been specified it is a deliberate reference + if host == 'localhost' or host.startswith('127.'): + return False + try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.connect( (host, 4242) )
1
import os, time, fnmatch, socket, errno from django.conf import settings from os.path import isdir, isfile, join, exists, splitext, basename, realpath import whisper from graphite.logger import log from graphite.remote_storage import RemoteStore from graphite.util import unpickle try: import rrdtool except ImportError: rrdtool = False try: import gzip except ImportError: gzip = False try: import cPickle as pickle except ImportError: import pickle DATASOURCE_DELIMETER = '::RRD_DATASOURCE::' class Store: def __init__(self, directories=[], remote_hosts=[]): self.directories = directories self.remote_hosts = remote_hosts self.remote_stores = [ RemoteStore(host) for host in remote_hosts if not is_local_interface(host) ] if not (directories or remote_hosts): raise ValueError("directories and remote_hosts cannot both be empty") def get(self, metric_path): #Deprecated for directory in self.directories: relative_fs_path = metric_path.replace('.', os.sep) + '.wsp' absolute_fs_path = join(directory, relative_fs_path) if exists(absolute_fs_path): return WhisperFile(absolute_fs_path, metric_path) def find(self, query): if is_pattern(query): for match in self.find_all(query): yield match else: match = self.find_first(query) if match is not None: yield match def find_first(self, query): # Search locally first for directory in self.directories: for match in find(directory, query): return match # If nothing found earch remotely remote_requests = [ r.find(query) for r in self.remote_stores if r.available ] for request in remote_requests: for match in request.get_results(): return match def find_all(self, query): # Start remote searches found = set() remote_requests = [ r.find(query) for r in self.remote_stores if r.available ] # Search locally for directory in self.directories: for match in find(directory, query): if match.metric_path not in found: yield match found.add(match.metric_path) # Gather remote search results for request in remote_requests: for match in request.get_results(): if match.metric_path not in found: yield match found.add(match.metric_path) def is_local_interface(host): if ':' in host: host = host.split(':',1)[0] try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.connect( (host, 4242) ) local_ip = sock.getsockname()[0] sock.close() except: log.exception("Failed to open socket with %s" % host) raise if local_ip == host: return True return False def is_pattern(s): return '*' in s or '?' in s or '[' in s or '{' in s def is_escaped_pattern(s): for symbol in '*?[{': i = s.find(symbol) if i > 0: if s[i-1] == '\\': return True return False def find_escaped_pattern_fields(pattern_string): pattern_parts = pattern_string.split('.') for index,part in enumerate(pattern_parts): if is_escaped_pattern(part): yield index def find(root_dir, pattern): "Generates nodes beneath root_dir matching the given pattern" clean_pattern = pattern.replace('\\', '') pattern_parts = clean_pattern.split('.') for absolute_path in _find(root_dir, pattern_parts): if DATASOURCE_DELIMETER in basename(absolute_path): (absolute_path,datasource_pattern) = absolute_path.rsplit(DATASOURCE_DELIMETER,1) else: datasource_pattern = None relative_path = absolute_path[ len(root_dir): ].lstrip('/') metric_path = relative_path.replace('/','.') # Preserve pattern in resulting path for escaped query pattern elements metric_path_parts = metric_path.split('.') for field_index in find_escaped_pattern_fields(pattern): metric_path_parts[field_index] = pattern_parts[field_index].replace('\\', '') metric_path = '.'.join(metric_path_parts) if isdir(absolute_path): yield Branch(absolute_path, metric_path) elif isfile(absolute_path): (metric_path,extension) = splitext(metric_path) if extension == '.wsp': yield WhisperFile(absolute_path, metric_path) elif extension == '.gz' and metric_path.endswith('.wsp'): metric_path = splitext(metric_path)[0] yield GzippedWhisperFile(absolute_path, metric_path) elif rrdtool and extension == '.rrd': rrd = RRDFile(absolute_path, metric_path) if datasource_pattern is None: yield rrd else: for source in rrd.getDataSources(): if fnmatch.fnmatch(source.name, datasource_pattern): yield source def _find(current_dir, patterns): """Recursively generates absolute paths whose components underneath current_dir match the corresponding pattern in patterns""" pattern = patterns[0] patterns = patterns[1:] try: entries = os.listdir(current_dir) except OSError as e: log.exception(e) entries = [] subdirs = [e for e in entries if isdir( join(current_dir,e) )] matching_subdirs = match_entries(subdirs, pattern) if len(patterns) == 1 and rrdtool: #the last pattern may apply to RRD data sources files = [e for e in entries if isfile( join(current_dir,e) )] rrd_files = match_entries(files, pattern + ".rrd") if rrd_files: #let's assume it does datasource_pattern = patterns[0] for rrd_file in rrd_files: absolute_path = join(current_dir, rrd_file) yield absolute_path + DATASOURCE_DELIMETER + datasource_pattern if patterns: #we've still got more directories to traverse for subdir in matching_subdirs: absolute_path = join(current_dir, subdir) for match in _find(absolute_path, patterns): yield match else: #we've got the last pattern files = [e for e in entries if isfile( join(current_dir,e) )] matching_files = match_entries(files, pattern + '.*') for basename in matching_files + matching_subdirs: yield join(current_dir, basename) def _deduplicate(entries): yielded = set() for entry in entries: if entry not in yielded: yielded.add(entry) yield entry def match_entries(entries, pattern): # First we check for pattern variants (ie. {foo,bar}baz = foobaz or barbaz) v1, v2 = pattern.find('{'), pattern.find('}') if v1 > -1 and v2 > v1: variations = pattern[v1+1:v2].split(',') variants = [ pattern[:v1] + v + pattern[v2+1:] for v in variations ] matching = [] for variant in variants: matching.extend( fnmatch.filter(entries, variant) ) return list( _deduplicate(matching) ) #remove dupes without changing order else: matching = fnmatch.filter(entries, pattern) matching.sort() return matching # Node classes class Node: context = {} def __init__(self, fs_path, metric_path): self.fs_path = str(fs_path) self.metric_path = str(metric_path) self.real_metric = str(metric_path) self.name = self.metric_path.split('.')[-1] def isLocal(self): return True def getIntervals(self): return [] def updateContext(self, newContext): raise NotImplementedError() class Branch(Node): "Node with children" def fetch(self, startTime, endTime, now=None): "No-op to make all Node's fetch-able" return [] def isLeaf(self): return False class Leaf(Node): "(Abstract) Node that stores data" def isLeaf(self): return True # Database File classes class WhisperFile(Leaf): cached_context_data = None extension = '.wsp' def __init__(self, *args, **kwargs): Leaf.__init__(self, *args, **kwargs) real_fs_path = realpath(self.fs_path) if real_fs_path != self.fs_path: relative_fs_path = self.metric_path.replace('.', '/') + self.extension base_fs_path = realpath(self.fs_path[ :-len(relative_fs_path) ]) relative_real_fs_path = real_fs_path[ len(base_fs_path)+1: ] self.real_metric = relative_real_fs_path[ :-len(self.extension) ].replace('/', '.') def getIntervals(self): start = time.time() - whisper.info(self.fs_path)['maxRetention'] end = max( os.stat(self.fs_path).st_mtime, start ) return [ (start, end) ] def fetch(self, startTime, endTime, now=None): return whisper.fetch(self.fs_path, startTime, endTime, now) @property def context(self): if self.cached_context_data is not None: return self.cached_context_data context_path = self.fs_path[ :-len(self.extension) ] + '.context.pickle' if exists(context_path): fh = open(context_path, 'rb') context_data = unpickle.load(fh) fh.close() else: context_data = {} self.cached_context_data = context_data return context_data def updateContext(self, newContext): self.context.update(newContext) context_path = self.fs_path[ :-len(self.extension) ] + '.context.pickle' fh = open(context_path, 'wb') pickle.dump(self.context, fh) fh.close() class GzippedWhisperFile(WhisperFile): extension = '.wsp.gz' def fetch(self, startTime, endTime, now=None): if not gzip: raise Exception("gzip module not available, GzippedWhisperFile not supported") fh = gzip.GzipFile(self.fs_path, 'rb') try: return whisper.file_fetch(fh, startTime, endTime, now) finally: fh.close() def getIntervals(self): if not gzip: return [] fh = gzip.GzipFile(self.fs_path, 'rb') try: start = time.time() - whisper.__readHeader(fh)['maxRetention'] end = max( os.stat(self.fs_path).st_mtime, start ) finally: fh.close() return [ (start, end) ] class RRDFile(Branch): def getDataSources(self): info = rrdtool.info(self.fs_path) if 'ds' in info: return [RRDDataSource(self, datasource_name) for datasource_name in info['ds']] else: ds_keys = [ key for key in info if key.startswith('ds[') ] datasources = set( key[3:].split(']')[0] for key in ds_keys ) return [ RRDDataSource(self, ds) for ds in datasources ] def getRetention(self): info = rrdtool.info(self.fs_path) if 'rra' in info: rras = info['rra'] else: # Ugh, I like the old python-rrdtool api better.. rra_count = max([ int(key[4]) for key in info if key.startswith('rra[') ]) + 1 rras = [{}] * rra_count for i in range(rra_count): rras[i]['pdp_per_row'] = info['rra[%d].pdp_per_row' % i] rras[i]['rows'] = info['rra[%d].rows' % i] retention_points = 0 for rra in rras: points = rra['pdp_per_row'] * rra['rows'] if points > retention_points: retention_points = points return retention_points * info['step'] class RRDDataSource(Leaf): def __init__(self, rrd_file, name): Leaf.__init__(self, rrd_file.fs_path, rrd_file.metric_path + '.' + name) self.rrd_file = rrd_file def getIntervals(self): start = time.time() - self.rrd_file.getRetention() end = max( os.stat(self.rrd_file.fs_path).st_mtime, start ) return [ (start, end) ] def fetch(self, startTime, endTime, now=None): # 'now' parameter is meaningful for whisper but not RRD startString = time.strftime("%H:%M_%Y%m%d+%Ss", time.localtime(startTime)) endString = time.strftime("%H:%M_%Y%m%d+%Ss", time.localtime(endTime)) if settings.FLUSHRRDCACHED: rrdtool.flushcached(self.fs_path, '--daemon', settings.FLUSHRRDCACHED) (timeInfo,columns,rows) = rrdtool.fetch(self.fs_path,'AVERAGE','-s' + startString,'-e' + endString) colIndex = list(columns).index(self.name) rows.pop() #chop off the latest value because RRD returns crazy last values sometimes values = (row[colIndex] for row in rows) return (timeInfo,values) # Exposed Storage API LOCAL_STORE = Store(settings.DATA_DIRS) STORE = Store(settings.DATA_DIRS, remote_hosts=settings.CLUSTER_SERVERS)
1
9,565
Given that 115 returns `True`, why would this be `False` here?
graphite-project-graphite-web
py
@@ -367,6 +367,17 @@ public class OAuth2LoginConfigurerTests { assertThat(this.response.getRedirectedUrl()).matches("http://localhost/oauth2/authorization/google"); } + // gh-6802 + @Test + public void oauth2LoginWithOneClientConfiguredAndFormLoginThenRedirectDefaultLoginPage() throws Exception { + loadConfig(OAuth2LoginConfigFormLogin.class); + String requestUri = "/"; + this.request = new MockHttpServletRequest("GET", requestUri); + this.request.setServletPath(requestUri); + this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); + assertThat(this.response.getRedirectedUrl()).matches("http://localhost/login"); + } + // gh-5347 @Test public void oauth2LoginWithOneClientConfiguredAndRequestFaviconNotAuthenticatedThenRedirectDefaultLoginPage()
1
/* * Copyright 2002-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.config.annotation.web.configurers.oauth2.client; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.http.HttpHeaders; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.springframework.beans.factory.BeanCreationException; import org.springframework.beans.factory.NoUniqueBeanDefinitionException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.ApplicationListener; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.mock.web.MockFilterChain; import org.springframework.mock.web.MockHttpServletRequest; import org.springframework.mock.web.MockHttpServletResponse; import org.springframework.security.authentication.event.AuthenticationSuccessEvent; import org.springframework.security.config.annotation.web.builders.HttpSecurity; import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; import org.springframework.security.config.oauth2.client.CommonOAuth2Provider; import org.springframework.security.config.test.SpringTestRule; import org.springframework.security.core.Authentication; import org.springframework.security.core.GrantedAuthority; import org.springframework.security.core.authority.AuthorityUtils; import org.springframework.security.core.authority.SimpleGrantedAuthority; import org.springframework.security.core.authority.mapping.GrantedAuthoritiesMapper; import org.springframework.security.oauth2.client.authentication.OAuth2AuthenticationToken; import org.springframework.security.oauth2.client.endpoint.OAuth2AccessTokenResponseClient; import org.springframework.security.oauth2.client.endpoint.OAuth2AuthorizationCodeGrantRequest; import org.springframework.security.oauth2.client.oidc.userinfo.OidcUserRequest; import org.springframework.security.oauth2.client.oidc.web.logout.OidcClientInitiatedLogoutSuccessHandler; import org.springframework.security.oauth2.client.registration.ClientRegistration; import org.springframework.security.oauth2.client.registration.ClientRegistrationRepository; import org.springframework.security.oauth2.client.registration.InMemoryClientRegistrationRepository; import org.springframework.security.oauth2.client.registration.TestClientRegistrations; import org.springframework.security.oauth2.client.userinfo.OAuth2UserRequest; import org.springframework.security.oauth2.client.userinfo.OAuth2UserService; import org.springframework.security.oauth2.client.web.AuthorizationRequestRepository; import org.springframework.security.oauth2.client.web.HttpSessionOAuth2AuthorizationRequestRepository; import org.springframework.security.oauth2.client.web.OAuth2AuthorizationRequestResolver; import org.springframework.security.oauth2.core.OAuth2AccessToken; import org.springframework.security.oauth2.core.endpoint.OAuth2AccessTokenResponse; import org.springframework.security.oauth2.core.endpoint.OAuth2AuthorizationRequest; import org.springframework.security.oauth2.core.endpoint.OAuth2ParameterNames; import org.springframework.security.oauth2.core.oidc.IdTokenClaimNames; import org.springframework.security.oauth2.core.oidc.OidcIdToken; import org.springframework.security.oauth2.core.oidc.TestOidcIdTokens; import org.springframework.security.oauth2.core.oidc.endpoint.OidcParameterNames; import org.springframework.security.oauth2.core.oidc.user.DefaultOidcUser; import org.springframework.security.oauth2.core.oidc.user.OidcUser; import org.springframework.security.oauth2.core.oidc.user.OidcUserAuthority; import org.springframework.security.oauth2.core.oidc.user.TestOidcUsers; import org.springframework.security.oauth2.core.user.DefaultOAuth2User; import org.springframework.security.oauth2.core.user.OAuth2User; import org.springframework.security.oauth2.core.user.OAuth2UserAuthority; import org.springframework.security.oauth2.jwt.Jwt; import org.springframework.security.oauth2.jwt.JwtDecoder; import org.springframework.security.oauth2.jwt.JwtDecoderFactory; import org.springframework.security.oauth2.jwt.TestJwts; import org.springframework.security.web.FilterChainProxy; import org.springframework.security.web.authentication.HttpStatusEntryPoint; import org.springframework.security.web.context.HttpRequestResponseHolder; import org.springframework.security.web.context.HttpSessionSecurityContextRepository; import org.springframework.security.web.context.SecurityContextRepository; import org.springframework.security.web.util.matcher.RequestHeaderRequestMatcher; import org.springframework.test.web.servlet.MockMvc; import org.springframework.web.context.support.AnnotationConfigWebApplicationContext; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.mockito.ArgumentMatchers.any; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors.authentication; import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors.csrf; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.redirectedUrl; /** * Tests for {@link OAuth2LoginConfigurer}. * * @author Kazuki Shimizu * @author Joe Grandja * @since 5.0.1 */ public class OAuth2LoginConfigurerTests { // @formatter:off private static final ClientRegistration GOOGLE_CLIENT_REGISTRATION = CommonOAuth2Provider.GOOGLE .getBuilder("google") .clientId("clientId") .clientSecret("clientSecret") .build(); // @formatter:on // @formatter:off private static final ClientRegistration GITHUB_CLIENT_REGISTRATION = CommonOAuth2Provider.GITHUB .getBuilder("github") .clientId("clientId") .clientSecret("clientSecret") .build(); // @formatter:on // @formatter:off private static final ClientRegistration CLIENT_CREDENTIALS_REGISTRATION = TestClientRegistrations.clientCredentials() .build(); // @formatter:on private ConfigurableApplicationContext context; @Autowired private FilterChainProxy springSecurityFilterChain; @Autowired private AuthorizationRequestRepository<OAuth2AuthorizationRequest> authorizationRequestRepository; @Autowired SecurityContextRepository securityContextRepository; @Rule public final SpringTestRule spring = new SpringTestRule(); @Autowired(required = false) MockMvc mvc; private MockHttpServletRequest request; private MockHttpServletResponse response; private MockFilterChain filterChain; @Before public void setup() { this.request = new MockHttpServletRequest("GET", ""); this.request.setServletPath("/login/oauth2/code/google"); this.response = new MockHttpServletResponse(); this.filterChain = new MockFilterChain(); } @After public void cleanup() { if (this.context != null) { this.context.close(); } } @Test public void oauth2Login() throws Exception { // setup application context loadConfig(OAuth2LoginConfig.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest(); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(1); assertThat(authentication.getAuthorities()).first().isInstanceOf(OAuth2UserAuthority.class) .hasToString("ROLE_USER"); } @Test public void requestWhenOauth2LoginInLambdaThenAuthenticationContainsOauth2UserAuthority() throws Exception { loadConfig(OAuth2LoginInLambdaConfig.class); OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest(); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(1); assertThat(authentication.getAuthorities()).first().isInstanceOf(OAuth2UserAuthority.class) .hasToString("ROLE_USER"); } // gh-6009 @Test public void oauth2LoginWhenSuccessThenAuthenticationSuccessEventPublished() throws Exception { // setup application context loadConfig(OAuth2LoginConfig.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest(); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions assertThat(OAuth2LoginConfig.EVENTS).isNotEmpty(); assertThat(OAuth2LoginConfig.EVENTS).hasSize(1); assertThat(OAuth2LoginConfig.EVENTS.get(0)).isInstanceOf(AuthenticationSuccessEvent.class); } @Test public void oauth2LoginCustomWithConfigurer() throws Exception { // setup application context loadConfig(OAuth2LoginConfigCustomWithConfigurer.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest(); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(2); assertThat(authentication.getAuthorities()).first().hasToString("ROLE_USER"); assertThat(authentication.getAuthorities()).last().hasToString("ROLE_OAUTH2_USER"); } @Test public void oauth2LoginCustomWithBeanRegistration() throws Exception { // setup application context loadConfig(OAuth2LoginConfigCustomWithBeanRegistration.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest(); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(2); assertThat(authentication.getAuthorities()).first().hasToString("ROLE_USER"); assertThat(authentication.getAuthorities()).last().hasToString("ROLE_OAUTH2_USER"); } @Test public void oauth2LoginCustomWithUserServiceBeanRegistration() throws Exception { // setup application context loadConfig(OAuth2LoginConfigCustomUserServiceBeanRegistration.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest(); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(2); assertThat(authentication.getAuthorities()).first().hasToString("ROLE_USER"); assertThat(authentication.getAuthorities()).last().hasToString("ROLE_OAUTH2_USER"); } // gh-5488 @Test public void oauth2LoginConfigLoginProcessingUrl() throws Exception { // setup application context loadConfig(OAuth2LoginConfigLoginProcessingUrl.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest(); this.request.setServletPath("/login/oauth2/google"); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(1); assertThat(authentication.getAuthorities()).first().isInstanceOf(OAuth2UserAuthority.class) .hasToString("ROLE_USER"); } // gh-5521 @Test public void oauth2LoginWithCustomAuthorizationRequestParameters() throws Exception { loadConfig(OAuth2LoginConfigCustomAuthorizationRequestResolver.class); OAuth2AuthorizationRequestResolver resolver = this.context .getBean(OAuth2LoginConfigCustomAuthorizationRequestResolver.class).resolver; // @formatter:off OAuth2AuthorizationRequest result = OAuth2AuthorizationRequest.authorizationCode() .authorizationUri("https://accounts.google.com/authorize") .clientId("client-id") .state("adsfa") .authorizationRequestUri( "https://accounts.google.com/o/oauth2/v2/auth?response_type=code&client_id=clientId&scope=openid+profile+email&state=state&redirect_uri=http%3A%2F%2Flocalhost%2Flogin%2Foauth2%2Fcode%2Fgoogle&custom-param1=custom-value1") .build(); // @formatter:on given(resolver.resolve(any())).willReturn(result); String requestUri = "/oauth2/authorization/google"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).isEqualTo( "https://accounts.google.com/o/oauth2/v2/auth?response_type=code&client_id=clientId&scope=openid+profile+email&state=state&redirect_uri=http%3A%2F%2Flocalhost%2Flogin%2Foauth2%2Fcode%2Fgoogle&custom-param1=custom-value1"); } @Test public void requestWhenOauth2LoginWithCustomAuthorizationRequestParametersThenParametersInRedirectedUrl() throws Exception { loadConfig(OAuth2LoginConfigCustomAuthorizationRequestResolverInLambda.class); OAuth2AuthorizationRequestResolver resolver = this.context .getBean(OAuth2LoginConfigCustomAuthorizationRequestResolverInLambda.class).resolver; // @formatter:off OAuth2AuthorizationRequest result = OAuth2AuthorizationRequest.authorizationCode() .authorizationUri("https://accounts.google.com/authorize") .clientId("client-id") .state("adsfa") .authorizationRequestUri( "https://accounts.google.com/o/oauth2/v2/auth?response_type=code&client_id=clientId&scope=openid+profile+email&state=state&redirect_uri=http%3A%2F%2Flocalhost%2Flogin%2Foauth2%2Fcode%2Fgoogle&custom-param1=custom-value1") .build(); // @formatter:on given(resolver.resolve(any())).willReturn(result); String requestUri = "/oauth2/authorization/google"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).isEqualTo( "https://accounts.google.com/o/oauth2/v2/auth?response_type=code&client_id=clientId&scope=openid+profile+email&state=state&redirect_uri=http%3A%2F%2Flocalhost%2Flogin%2Foauth2%2Fcode%2Fgoogle&custom-param1=custom-value1"); } // gh-5347 @Test public void oauth2LoginWithOneClientConfiguredThenRedirectForAuthorization() throws Exception { loadConfig(OAuth2LoginConfig.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).matches("http://localhost/oauth2/authorization/google"); } // gh-5347 @Test public void oauth2LoginWithOneClientConfiguredAndRequestFaviconNotAuthenticatedThenRedirectDefaultLoginPage() throws Exception { loadConfig(OAuth2LoginConfig.class); String requestUri = "/favicon.ico"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.request.addHeader(HttpHeaders.ACCEPT, new MediaType("image", "*").toString()); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).matches("http://localhost/login"); } // gh-5347 @Test public void oauth2LoginWithMultipleClientsConfiguredThenRedirectDefaultLoginPage() throws Exception { loadConfig(OAuth2LoginConfigMultipleClients.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).matches("http://localhost/login"); } // gh-6812 @Test public void oauth2LoginWithOneClientConfiguredAndRequestXHRNotAuthenticatedThenDoesNotRedirectForAuthorization() throws Exception { loadConfig(OAuth2LoginConfig.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.request.addHeader("X-Requested-With", "XMLHttpRequest"); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).doesNotMatch("http://localhost/oauth2/authorization/google"); } @Test public void oauth2LoginWithHttpBasicOneClientConfiguredAndRequestXHRNotAuthenticatedThenUnauthorized() throws Exception { loadConfig(OAuth2LoginWithHttpBasicConfig.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.request.addHeader("X-Requested-With", "XMLHttpRequest"); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getStatus()).isEqualTo(401); } @Test public void oauth2LoginWithXHREntryPointOneClientConfiguredAndRequestXHRNotAuthenticatedThenUnauthorized() throws Exception { loadConfig(OAuth2LoginWithXHREntryPointConfig.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.request.addHeader("X-Requested-With", "XMLHttpRequest"); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getStatus()).isEqualTo(401); } // gh-9457 @Test public void oauth2LoginWithOneAuthorizationCodeClientAndOtherClientsConfiguredThenRedirectForAuthorization() throws Exception { loadConfig(OAuth2LoginConfigAuthorizationCodeClientAndOtherClients.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).matches("http://localhost/oauth2/authorization/google"); } @Test public void oauth2LoginWithCustomLoginPageThenRedirectCustomLoginPage() throws Exception { loadConfig(OAuth2LoginConfigCustomLoginPage.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).matches("http://localhost/custom-login"); } @Test public void requestWhenOauth2LoginWithCustomLoginPageInLambdaThenRedirectCustomLoginPage() throws Exception { loadConfig(OAuth2LoginConfigCustomLoginPageInLambda.class); String requestUri = "/"; this.request = new MockHttpServletRequest("GET", requestUri); this.request.setServletPath(requestUri); this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); assertThat(this.response.getRedirectedUrl()).matches("http://localhost/custom-login"); } @Test public void oidcLogin() throws Exception { // setup application context loadConfig(OAuth2LoginConfig.class, JwtDecoderFactoryConfig.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest("openid"); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(1); assertThat(authentication.getAuthorities()).first().isInstanceOf(OidcUserAuthority.class) .hasToString("ROLE_USER"); } @Test public void requestWhenOauth2LoginInLambdaAndOidcThenAuthenticationContainsOidcUserAuthority() throws Exception { // setup application context loadConfig(OAuth2LoginInLambdaConfig.class, JwtDecoderFactoryConfig.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest("openid"); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(1); assertThat(authentication.getAuthorities()).first().isInstanceOf(OidcUserAuthority.class) .hasToString("ROLE_USER"); } @Test public void oidcLoginCustomWithConfigurer() throws Exception { // setup application context loadConfig(OAuth2LoginConfigCustomWithConfigurer.class, JwtDecoderFactoryConfig.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest("openid"); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(2); assertThat(authentication.getAuthorities()).first().hasToString("ROLE_USER"); assertThat(authentication.getAuthorities()).last().hasToString("ROLE_OIDC_USER"); } @Test public void oidcLoginCustomWithBeanRegistration() throws Exception { // setup application context loadConfig(OAuth2LoginConfigCustomWithBeanRegistration.class, JwtDecoderFactoryConfig.class); // setup authorization request OAuth2AuthorizationRequest authorizationRequest = createOAuth2AuthorizationRequest("openid"); this.authorizationRequestRepository.saveAuthorizationRequest(authorizationRequest, this.request, this.response); // setup authentication parameters this.request.setParameter("code", "code123"); this.request.setParameter("state", authorizationRequest.getState()); // perform test this.springSecurityFilterChain.doFilter(this.request, this.response, this.filterChain); // assertions Authentication authentication = this.securityContextRepository .loadContext(new HttpRequestResponseHolder(this.request, this.response)).getAuthentication(); assertThat(authentication.getAuthorities()).hasSize(2); assertThat(authentication.getAuthorities()).first().hasToString("ROLE_USER"); assertThat(authentication.getAuthorities()).last().hasToString("ROLE_OIDC_USER"); } @Test public void oidcLoginCustomWithNoUniqueJwtDecoderFactory() { assertThatExceptionOfType(BeanCreationException.class) .isThrownBy(() -> loadConfig(OAuth2LoginConfig.class, NoUniqueJwtDecoderFactoryConfig.class)) .withRootCauseInstanceOf(NoUniqueBeanDefinitionException.class) .withMessageContaining("No qualifying bean of type " + "'org.springframework.security.oauth2.jwt.JwtDecoderFactory<org.springframework.security.oauth2.client.registration.ClientRegistration>' " + "available: expected single matching bean but found 2: jwtDecoderFactory1,jwtDecoderFactory2"); } @Test public void logoutWhenUsingOidcLogoutHandlerThenRedirects() throws Exception { this.spring.register(OAuth2LoginConfigWithOidcLogoutSuccessHandler.class).autowire(); OAuth2AuthenticationToken token = new OAuth2AuthenticationToken(TestOidcUsers.create(), AuthorityUtils.NO_AUTHORITIES, "registration-id"); this.mvc.perform(post("/logout").with(authentication(token)).with(csrf())) .andExpect(redirectedUrl("https://logout?id_token_hint=id-token")); } private void loadConfig(Class<?>... configs) { AnnotationConfigWebApplicationContext applicationContext = new AnnotationConfigWebApplicationContext(); applicationContext.register(configs); applicationContext.refresh(); applicationContext.getAutowireCapableBeanFactory().autowireBean(this); this.context = applicationContext; } private OAuth2AuthorizationRequest createOAuth2AuthorizationRequest(String... scopes) { return this.createOAuth2AuthorizationRequest(GOOGLE_CLIENT_REGISTRATION, scopes); } private OAuth2AuthorizationRequest createOAuth2AuthorizationRequest(ClientRegistration registration, String... scopes) { // @formatter:off return OAuth2AuthorizationRequest.authorizationCode() .authorizationUri(registration.getProviderDetails().getAuthorizationUri()) .clientId(registration.getClientId()) .state("state123") .redirectUri("http://localhost") .attributes(Collections.singletonMap(OAuth2ParameterNames.REGISTRATION_ID, registration.getRegistrationId())) .scope(scopes) .build(); // @formatter:on } private static OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> createOauth2AccessTokenResponseClient() { return (request) -> { Map<String, Object> additionalParameters = new HashMap<>(); if (request.getAuthorizationExchange().getAuthorizationRequest().getScopes().contains("openid")) { additionalParameters.put(OidcParameterNames.ID_TOKEN, "token123"); } return OAuth2AccessTokenResponse.withToken("accessToken123").tokenType(OAuth2AccessToken.TokenType.BEARER) .additionalParameters(additionalParameters).build(); }; } private static OAuth2UserService<OAuth2UserRequest, OAuth2User> createOauth2UserService() { Map<String, Object> userAttributes = Collections.singletonMap("name", "spring"); return (request) -> new DefaultOAuth2User(Collections.singleton(new OAuth2UserAuthority(userAttributes)), userAttributes, "name"); } private static OAuth2UserService<OidcUserRequest, OidcUser> createOidcUserService() { OidcIdToken idToken = TestOidcIdTokens.idToken().build(); return (request) -> new DefaultOidcUser(Collections.singleton(new OidcUserAuthority(idToken)), idToken); } private static GrantedAuthoritiesMapper createGrantedAuthoritiesMapper() { return (authorities) -> { boolean isOidc = OidcUserAuthority.class.isInstance(authorities.iterator().next()); List<GrantedAuthority> mappedAuthorities = new ArrayList<>(authorities); mappedAuthorities.add(new SimpleGrantedAuthority(isOidc ? "ROLE_OIDC_USER" : "ROLE_OAUTH2_USER")); return mappedAuthorities; }; } @EnableWebSecurity static class OAuth2LoginConfig extends CommonWebSecurityConfigurerAdapter implements ApplicationListener<AuthenticationSuccessEvent> { static List<AuthenticationSuccessEvent> EVENTS = new ArrayList<>(); @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)); // @formatter:on super.configure(http); } @Override public void onApplicationEvent(AuthenticationSuccessEvent event) { EVENTS.add(event); } } @EnableWebSecurity static class OAuth2LoginInLambdaConfig extends CommonLambdaWebSecurityConfigurerAdapter implements ApplicationListener<AuthenticationSuccessEvent> { static List<AuthenticationSuccessEvent> EVENTS = new ArrayList<>(); @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login((oauth2Login) -> oauth2Login .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)) ); // @formatter:on super.configure(http); } @Override public void onApplicationEvent(AuthenticationSuccessEvent event) { EVENTS.add(event); } } @EnableWebSecurity static class OAuth2LoginConfigCustomWithConfigurer extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)) .userInfoEndpoint() .userAuthoritiesMapper(createGrantedAuthoritiesMapper()); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigCustomWithBeanRegistration extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login(); // @formatter:on super.configure(http); } @Bean ClientRegistrationRepository clientRegistrationRepository() { return new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION); } @Bean GrantedAuthoritiesMapper grantedAuthoritiesMapper() { return createGrantedAuthoritiesMapper(); } } @EnableWebSecurity static class OAuth2LoginConfigCustomUserServiceBeanRegistration extends WebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .authorizeRequests() .anyRequest().authenticated() .and() .securityContext() .securityContextRepository(securityContextRepository()) .and() .oauth2Login() .tokenEndpoint() .accessTokenResponseClient(createOauth2AccessTokenResponseClient()); // @formatter:on } @Bean ClientRegistrationRepository clientRegistrationRepository() { return new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION); } @Bean GrantedAuthoritiesMapper grantedAuthoritiesMapper() { return createGrantedAuthoritiesMapper(); } @Bean SecurityContextRepository securityContextRepository() { return new HttpSessionSecurityContextRepository(); } @Bean HttpSessionOAuth2AuthorizationRequestRepository oauth2AuthorizationRequestRepository() { return new HttpSessionOAuth2AuthorizationRequestRepository(); } @Bean OAuth2UserService<OAuth2UserRequest, OAuth2User> oauth2UserService() { return createOauth2UserService(); } @Bean OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService() { return createOidcUserService(); } } @EnableWebSecurity static class OAuth2LoginConfigLoginProcessingUrl extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)) .loginProcessingUrl("/login/oauth2/*"); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigCustomAuthorizationRequestResolver extends CommonWebSecurityConfigurerAdapter { private ClientRegistrationRepository clientRegistrationRepository = new InMemoryClientRegistrationRepository( GOOGLE_CLIENT_REGISTRATION); OAuth2AuthorizationRequestResolver resolver = mock(OAuth2AuthorizationRequestResolver.class); @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository(this.clientRegistrationRepository) .authorizationEndpoint() .authorizationRequestResolver(this.resolver); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigCustomAuthorizationRequestResolverInLambda extends CommonLambdaWebSecurityConfigurerAdapter { private ClientRegistrationRepository clientRegistrationRepository = new InMemoryClientRegistrationRepository( GOOGLE_CLIENT_REGISTRATION); OAuth2AuthorizationRequestResolver resolver = mock(OAuth2AuthorizationRequestResolver.class); @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login((oauth2Login) -> oauth2Login .clientRegistrationRepository(this.clientRegistrationRepository) .authorizationEndpoint((authorizationEndpoint) -> authorizationEndpoint .authorizationRequestResolver(this.resolver) ) ); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigMultipleClients extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository( GOOGLE_CLIENT_REGISTRATION, GITHUB_CLIENT_REGISTRATION)); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigAuthorizationCodeClientAndOtherClients extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository( GOOGLE_CLIENT_REGISTRATION, CLIENT_CREDENTIALS_REGISTRATION)); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigCustomLoginPage extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)) .loginPage("/custom-login"); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigCustomLoginPageInLambda extends CommonLambdaWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login((oauth2Login) -> oauth2Login .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)) .loginPage("/custom-login") ); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginConfigWithOidcLogoutSuccessHandler extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()); // @formatter:on super.configure(http); } @Bean OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler() { return new OidcClientInitiatedLogoutSuccessHandler(clientRegistrationRepository()); } @Bean ClientRegistrationRepository clientRegistrationRepository() { Map<String, Object> providerMetadata = Collections.singletonMap("end_session_endpoint", "https://logout"); return new InMemoryClientRegistrationRepository(TestClientRegistrations.clientRegistration() .providerConfigurationMetadata(providerMetadata).build()); } } @EnableWebSecurity static class OAuth2LoginWithHttpBasicConfig extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)) .and() .httpBasic(); // @formatter:on super.configure(http); } } @EnableWebSecurity static class OAuth2LoginWithXHREntryPointConfig extends CommonWebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .oauth2Login() .clientRegistrationRepository( new InMemoryClientRegistrationRepository(GOOGLE_CLIENT_REGISTRATION)) .and() .exceptionHandling() .defaultAuthenticationEntryPointFor( new HttpStatusEntryPoint(HttpStatus.UNAUTHORIZED), new RequestHeaderRequestMatcher("X-Requested-With", "XMLHttpRequest")); // @formatter:on super.configure(http); } } private abstract static class CommonWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .authorizeRequests() .anyRequest().authenticated() .and() .securityContext() .securityContextRepository(securityContextRepository()) .and() .oauth2Login() .tokenEndpoint() .accessTokenResponseClient(createOauth2AccessTokenResponseClient()) .and() .userInfoEndpoint() .userService(createOauth2UserService()) .oidcUserService(createOidcUserService()); // @formatter:on } @Bean SecurityContextRepository securityContextRepository() { return new HttpSessionSecurityContextRepository(); } @Bean HttpSessionOAuth2AuthorizationRequestRepository oauth2AuthorizationRequestRepository() { return new HttpSessionOAuth2AuthorizationRequestRepository(); } } private abstract static class CommonLambdaWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .authorizeRequests((authorizeRequests) -> authorizeRequests .anyRequest().authenticated() ) .securityContext((securityContext) -> securityContext .securityContextRepository(securityContextRepository()) ) .oauth2Login((oauth2Login) -> oauth2Login .tokenEndpoint((tokenEndpoint) -> tokenEndpoint .accessTokenResponseClient(createOauth2AccessTokenResponseClient()) ) .userInfoEndpoint((userInfoEndpoint) -> userInfoEndpoint .userService(createOauth2UserService()) .oidcUserService(createOidcUserService()) ) ); // @formatter:on } @Bean SecurityContextRepository securityContextRepository() { return new HttpSessionSecurityContextRepository(); } @Bean HttpSessionOAuth2AuthorizationRequestRepository oauth2AuthorizationRequestRepository() { return new HttpSessionOAuth2AuthorizationRequestRepository(); } } @Configuration static class JwtDecoderFactoryConfig { @Bean JwtDecoderFactory<ClientRegistration> jwtDecoderFactory() { return (clientRegistration) -> getJwtDecoder(); } private static JwtDecoder getJwtDecoder() { Map<String, Object> claims = new HashMap<>(); claims.put(IdTokenClaimNames.SUB, "sub123"); claims.put(IdTokenClaimNames.ISS, "http://localhost/iss"); claims.put(IdTokenClaimNames.AUD, Arrays.asList("clientId", "a", "u", "d")); claims.put(IdTokenClaimNames.AZP, "clientId"); Jwt jwt = TestJwts.jwt().claims((c) -> c.putAll(claims)).build(); JwtDecoder jwtDecoder = mock(JwtDecoder.class); given(jwtDecoder.decode(any())).willReturn(jwt); return jwtDecoder; } } @Configuration static class NoUniqueJwtDecoderFactoryConfig { @Bean JwtDecoderFactory<ClientRegistration> jwtDecoderFactory1() { return (clientRegistration) -> JwtDecoderFactoryConfig.getJwtDecoder(); } @Bean JwtDecoderFactory<ClientRegistration> jwtDecoderFactory2() { return (clientRegistration) -> JwtDecoderFactoryConfig.getJwtDecoder(); } } }
1
17,542
Please move this test method just below `oauth2LoginWithOneClientConfiguredThenRedirectForAuthorization()`
spring-projects-spring-security
java
@@ -270,6 +270,9 @@ bool LatencyTestPublisher::init(int n_sub, int n_sam, bool reliable, uint32_t pi PubDataparam.topic.topicName = pt.str(); PubDataparam.times.heartbeatPeriod.seconds = 0; PubDataparam.times.heartbeatPeriod.fraction = 4294967 * 100; + PubDataparam.qos.m_liveliness.lease_duration = c_TimeInfinite; + PubDataparam.qos.m_liveliness.announcement_period = Duration_t(1, 0); + if (!reliable) { PubDataparam.qos.m_reliability.kind = BEST_EFFORT_RELIABILITY_QOS;
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file LatencyPublisher.cpp * */ #include "LatencyTestPublisher.h" #include "fastrtps/log/Log.h" #include "fastrtps/log/Colors.h" #include <fastrtps/xmlparser/XMLProfileManager.h> #include <numeric> #include <cmath> #include <fstream> #include <inttypes.h> #define TIME_LIMIT_US 10000 using namespace eprosima; using namespace eprosima::fastrtps; using namespace eprosima::fastrtps::rtps; uint32_t dataspub[] = {12, 28, 60, 124, 252, 508, 1020, 2044, 4092, 8188, 16380}; uint32_t dataspub_large[] = {63996, 131068}; std::vector<uint32_t> data_size_pub; LatencyTestPublisher::LatencyTestPublisher(): mp_participant(nullptr), mp_datapub(nullptr), mp_commandpub(nullptr), mp_datasub(nullptr), mp_commandsub(nullptr), t_overhead_(0.0), n_subscribers(0), n_samples(0), disc_count_(0), comm_count_(0), data_count_(0), m_status(0), n_received(0), m_datapublistener(nullptr), m_datasublistener(nullptr), m_commandpublistener(nullptr), m_commandsublistener(nullptr), mp_latency_in(nullptr), mp_latency_out(nullptr), m_DynData_in(nullptr), m_DynData_out(nullptr) { m_forcedDomain = -1; m_datapublistener.mp_up = this; m_datasublistener.mp_up = this; m_commandpublistener.mp_up = this; m_commandsublistener.mp_up = this; m_exportPrefix = ""; } LatencyTestPublisher::~LatencyTestPublisher() { Domain::removeParticipant(mp_participant); } bool LatencyTestPublisher::init(int n_sub, int n_sam, bool reliable, uint32_t pid, bool hostname, bool export_csv, const std::string& export_prefix, const PropertyPolicy& part_property_policy, const PropertyPolicy& property_policy, bool large_data, const std::string& sXMLConfigFile, bool dynamic_types, int forced_domain) { m_sXMLConfigFile = sXMLConfigFile; n_samples = n_sam; n_subscribers = n_sub; n_export_csv = export_csv; m_exportPrefix = export_prefix; reliable_ = reliable; dynamic_data = dynamic_types; m_forcedDomain = forced_domain; if(!large_data) { data_size_pub.assign(dataspub, dataspub + sizeof(dataspub) / sizeof(uint32_t) ); } else { data_size_pub.assign(dataspub_large, dataspub_large + sizeof(dataspub_large) / sizeof(uint32_t) ); } if (dynamic_data) { // Create basic builders DynamicTypeBuilder_ptr struct_type_builder(DynamicTypeBuilderFactory::GetInstance()->CreateStructBuilder()); // Add members to the struct. struct_type_builder->AddMember(0, "seqnum", DynamicTypeBuilderFactory::GetInstance()->CreateUint32Type()); struct_type_builder->AddMember(1, "data", DynamicTypeBuilderFactory::GetInstance()->CreateSequenceBuilder( DynamicTypeBuilderFactory::GetInstance()->CreateByteType(), data_size_pub.back() )); struct_type_builder->SetName("LatencyType"); m_pDynType = struct_type_builder->Build(); m_DynType.SetDynamicType(m_pDynType); } ////////////////////////////// /* char date_buffer[9]; char time_buffer[7]; time_t t = time(0); // get time now struct tm * now = localtime(&t); strftime(date_buffer, 9, "%Y%m%d", now); strftime(time_buffer, 7, "%H%M%S", now); */ for (std::vector<uint32_t>::iterator it = data_size_pub.begin(); it != data_size_pub.end(); ++it) { output_file_minimum << "\"" << n_samples << " samples of " << *it + 4 << " bytes (us)\""; output_file_average << "\"" << n_samples << " samples of " << *it + 4 << " bytes (us)\""; if (it != data_size_pub.end() - 1) { output_file_minimum << ","; output_file_average << ","; } std::string str_reliable = "besteffort"; if(reliable_) { str_reliable = "reliable"; } switch (*it + 4) { case 16: output_file_16 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_16 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 32: output_file_32 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_32 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 64: output_file_64 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_64 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 128: output_file_128 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_128 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 256: output_file_256 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_256 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 512: output_file_512 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_512 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 1024: output_file_1024 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_1024 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 2048: output_file_2048 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_2048 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 4096: output_file_4096 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_4096 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 8192: output_file_8192 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_8192 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 16384: output_file_16384 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_16384 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 64000: output_file_64000 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_64000 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; case 131072: output_file_131072 << "\"Minimum of " << n_samples << " samples (" << str_reliable << ")\","; output_file_131072 << "\"Average of " << n_samples << " samples (" << str_reliable << ")\"" << std::endl; break; default: break; } } output_file_minimum << std::endl; output_file_average << std::endl; ////////////////////////////// // Create RTPSParticipant std::string participant_profile_name = "participant_profile"; ParticipantAttributes PParam; if (m_forcedDomain >= 0) { PParam.rtps.builtin.domainId = m_forcedDomain; } else { PParam.rtps.builtin.domainId = pid % 230; } PParam.rtps.properties = part_property_policy; PParam.rtps.setName("Participant_pub"); if (m_sXMLConfigFile.length() > 0) { if (m_forcedDomain >= 0) { ParticipantAttributes participant_att; if (eprosima::fastrtps::xmlparser::XMLP_ret::XML_OK == eprosima::fastrtps::xmlparser::XMLProfileManager::fillParticipantAttributes(participant_profile_name, participant_att)) { participant_att.rtps.builtin.domainId = m_forcedDomain; mp_participant = Domain::createParticipant(participant_att); } } else { mp_participant = Domain::createParticipant(participant_profile_name); } } else { mp_participant = Domain::createParticipant(PParam); } if (mp_participant == nullptr) { return false; } // Register the type if (dynamic_data) { Domain::registerType(mp_participant, &m_DynType); } else { Domain::registerType(mp_participant, (TopicDataType*)&latency_t); } Domain::registerType(mp_participant, (TopicDataType*)&command_t); // Create Data Publisher std::string profile_name = "publisher_profile"; PublisherAttributes PubDataparam; PubDataparam.topic.topicDataType = "LatencyType"; PubDataparam.topic.topicKind = NO_KEY; std::ostringstream pt; pt << "LatencyTest_"; if (hostname) pt << asio::ip::host_name() << "_"; pt << pid << "_PUB2SUB"; PubDataparam.topic.topicName = pt.str(); PubDataparam.times.heartbeatPeriod.seconds = 0; PubDataparam.times.heartbeatPeriod.fraction = 4294967 * 100; if (!reliable) { PubDataparam.qos.m_reliability.kind = BEST_EFFORT_RELIABILITY_QOS; } PubDataparam.properties = property_policy; if (large_data) { PubDataparam.historyMemoryPolicy = eprosima::fastrtps::rtps::PREALLOCATED_WITH_REALLOC_MEMORY_MODE; PubDataparam.qos.m_publishMode.kind = eprosima::fastrtps::ASYNCHRONOUS_PUBLISH_MODE; } if (m_sXMLConfigFile.length() > 0) { mp_datapub = Domain::createPublisher(mp_participant, profile_name, (PublisherListener*)&this->m_datapublistener); } else { mp_datapub = Domain::createPublisher(mp_participant, PubDataparam, (PublisherListener*)&this->m_datapublistener); } if (mp_datapub == nullptr) { return false; } // Create Echo Subscriber profile_name = "subscriber_profile"; SubscriberAttributes SubDataparam; SubDataparam.topic.topicDataType = "LatencyType"; SubDataparam.topic.topicKind = NO_KEY; std::ostringstream st; st << "LatencyTest_"; if (hostname) st << asio::ip::host_name() << "_"; st << pid << "_SUB2PUB"; SubDataparam.topic.topicName = st.str(); if (reliable) { SubDataparam.qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; } SubDataparam.properties = property_policy; if (large_data) { SubDataparam.historyMemoryPolicy = eprosima::fastrtps::rtps::PREALLOCATED_WITH_REALLOC_MEMORY_MODE; } if (m_sXMLConfigFile.length() > 0) { mp_datasub = Domain::createSubscriber(mp_participant, profile_name, &this->m_datasublistener); } else { mp_datasub = Domain::createSubscriber(mp_participant, SubDataparam, &this->m_datasublistener); } if (mp_datasub == nullptr) { return false; } //COMMAND PUBLISHER PublisherAttributes PubCommandParam; PubCommandParam.topic.topicDataType = "TestCommandType"; PubCommandParam.topic.topicKind = NO_KEY; std::ostringstream pct; pct << "LatencyTest_Command_"; if (hostname) pct << asio::ip::host_name() << "_"; pct << pid << "_PUB2SUB"; PubCommandParam.topic.topicName = pct.str(); PubCommandParam.topic.historyQos.kind = KEEP_ALL_HISTORY_QOS; PubCommandParam.qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS; mp_commandpub = Domain::createPublisher(mp_participant, PubCommandParam, &this->m_commandpublistener); if (mp_commandpub == nullptr) { return false; } SubscriberAttributes SubCommandParam; SubCommandParam.topic.topicDataType = "TestCommandType"; SubCommandParam.topic.topicKind = NO_KEY; std::ostringstream sct; sct << "LatencyTest_Command_"; if (hostname) sct << asio::ip::host_name() << "_"; sct << pid << "_SUB2PUB"; SubCommandParam.topic.topicName = sct.str(); SubCommandParam.topic.historyQos.kind = KEEP_ALL_HISTORY_QOS; SubCommandParam.qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; SubCommandParam.qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS; mp_commandsub = Domain::createSubscriber(mp_participant, SubCommandParam, &this->m_commandsublistener); if (mp_commandsub == nullptr) { return false; } // Calculate overhead t_start_ = std::chrono::steady_clock::now(); for (int i = 0; i < 1000; ++i) t_end_ = std::chrono::steady_clock::now(); t_overhead_ = std::chrono::duration<double, std::micro>(t_end_ - t_start_) / 1001; cout << "Overhead " << t_overhead_.count() << " ns" << endl; return true; } void LatencyTestPublisher::DataPubListener::onPublicationMatched(Publisher* /*pub*/, MatchingInfo& info) { std::unique_lock<std::mutex> lock(mp_up->mutex_); if(info.status == MATCHED_MATCHING) { cout << C_MAGENTA << "Data Pub Matched "<<C_DEF<<endl; n_matched++; if(n_matched > mp_up->n_subscribers) { std::cout << "More matched subscribers than expected" << std::endl; mp_up->m_status = -1; } ++mp_up->disc_count_; } else { --mp_up->disc_count_; } lock.unlock(); mp_up->disc_cond_.notify_one(); } void LatencyTestPublisher::DataSubListener::onSubscriptionMatched(Subscriber* /*sub*/,MatchingInfo& info) { std::unique_lock<std::mutex> lock(mp_up->mutex_); if(info.status == MATCHED_MATCHING) { cout << C_MAGENTA << "Data Sub Matched "<<C_DEF<<endl; n_matched++; if(n_matched > mp_up->n_subscribers) { std::cout << "More matched subscribers than expected" << std::endl; mp_up->m_status = -1; } ++mp_up->disc_count_; } else { --mp_up->disc_count_; } lock.unlock(); mp_up->disc_cond_.notify_one(); } void LatencyTestPublisher::CommandPubListener::onPublicationMatched(Publisher* /*pub*/, MatchingInfo& info) { std::unique_lock<std::mutex> lock(mp_up->mutex_); if(info.status == MATCHED_MATCHING) { cout << C_MAGENTA << "Command Pub Matched "<<C_DEF<<endl; n_matched++; if(n_matched > mp_up->n_subscribers) { std::cout << "More matched subscribers than expected" << std::endl; mp_up->m_status = -1; } ++mp_up->disc_count_; } else { --mp_up->disc_count_; } lock.unlock(); mp_up->disc_cond_.notify_one(); } void LatencyTestPublisher::CommandSubListener::onSubscriptionMatched(Subscriber* /*sub*/,MatchingInfo& info) { std::unique_lock<std::mutex> lock(mp_up->mutex_); if(info.status == MATCHED_MATCHING) { cout << C_MAGENTA << "Command Sub Matched "<<C_DEF<<endl; n_matched++; if(n_matched > mp_up->n_subscribers) { std::cout << "More matched subscribers than expected" << std::endl; mp_up->m_status = -1; } ++mp_up->disc_count_; } else { --mp_up->disc_count_; } lock.unlock(); mp_up->disc_cond_.notify_one(); } void LatencyTestPublisher::CommandSubListener::onNewDataMessage(Subscriber* subscriber) { TestCommandType command; SampleInfo_t info; // cout << "COMMAND RECEIVED"<<endl; if(subscriber->takeNextData((void*)&command,&info)) { if(info.sampleKind == ALIVE) { //cout << "ALIVE "<<command.m_command<<endl; if(command.m_command == BEGIN) { // cout << "POSTING"<<endl; mp_up->mutex_.lock(); ++mp_up->comm_count_; mp_up->mutex_.unlock(); mp_up->comm_cond_.notify_one(); } } } else cout<< "Problem reading"<<endl; } void LatencyTestPublisher::DataSubListener::onNewDataMessage(Subscriber* subscriber) { if (mp_up->dynamic_data) { subscriber->takeNextData((void*)mp_up->m_DynData_in,&mp_up->m_sampleinfo); if (mp_up->m_DynData_in->GetUint32Value(0) == mp_up->m_DynData_out->GetUint32Value(0)) { mp_up->t_end_ = std::chrono::steady_clock::now(); mp_up->times_.push_back(std::chrono::duration<double, std::micro>(mp_up->t_end_ - mp_up->t_start_) - mp_up->t_overhead_); mp_up->n_received++; // Reset seqnum from out data mp_up->m_DynData_out->SetUint32Value(0, 0); mp_up->mutex_.lock(); if(mp_up->data_count_ == 0) { ++mp_up->data_count_; mp_up->data_cond_.notify_one(); } mp_up->mutex_.unlock(); } } else { subscriber->takeNextData((void*)mp_up->mp_latency_in,&mp_up->m_sampleinfo); if(mp_up->mp_latency_in->seqnum == mp_up->mp_latency_out->seqnum) { mp_up->t_end_ = std::chrono::steady_clock::now(); mp_up->times_.push_back(std::chrono::duration<double, std::micro>(mp_up->t_end_ - mp_up->t_start_) - mp_up->t_overhead_); mp_up->n_received++; // Reset seqnum from out data mp_up->mp_latency_out->seqnum = 0; mp_up->mutex_.lock(); if(mp_up->data_count_ == 0) { ++mp_up->data_count_; mp_up->data_cond_.notify_one(); } mp_up->mutex_.unlock(); } } } void LatencyTestPublisher::run() { //WAIT FOR THE DISCOVERY PROCESS FO FINISH: //EACH SUBSCRIBER NEEDS 4 Matchings (2 publishers and 2 subscribers) std::unique_lock<std::mutex> disc_lock(mutex_); while(disc_count_ != (n_subscribers * 4)) disc_cond_.wait(disc_lock); disc_lock.unlock(); cout << C_B_MAGENTA << "DISCOVERY COMPLETE "<<C_DEF<<endl; printf("Printing round-trip times in us, statistics for %d samples\n",n_samples); printf(" Bytes, Samples, stdev, mean, min, 50%%, 90%%, 99%%, 99.99%%, max\n"); printf("--------,--------,--------,--------,--------,--------,--------,--------,--------,--------,\n"); for(std::vector<uint32_t>::iterator ndata = data_size_pub.begin(); ndata != data_size_pub.end(); ++ndata) { if(!this->test(*ndata)) break; eClock::my_sleep(100); if (ndata != data_size_pub.end() - 1) { output_file_minimum << ","; output_file_average << ","; } } cout << "REMOVING PUBLISHER"<<endl; Domain::removePublisher(this->mp_commandpub); cout << "REMOVING SUBSCRIBER"<<endl; Domain::removeSubscriber(mp_commandsub); std::string str_reliable = "besteffort"; if(reliable_) str_reliable = "reliable"; if (n_export_csv) { std::ofstream outFile; std::string prefix = m_exportPrefix; if (prefix.length() == 0) { prefix = "perf_LatencyTest"; } outFile.open(prefix + "_minimum_" + str_reliable + ".csv"); outFile << output_file_minimum.str(); outFile.close(); outFile.open(prefix + "_average_" + str_reliable + ".csv"); outFile << output_file_average.str(); outFile.close(); outFile.open(prefix + "_16_" + str_reliable + ".csv"); outFile << output_file_16.str(); outFile.close(); outFile.open(prefix + "_32_" + str_reliable + ".csv"); outFile << output_file_32.str(); outFile.close(); outFile.open(prefix + "_64_" + str_reliable + ".csv"); outFile << output_file_64.str(); outFile.close(); outFile.open(prefix + "_128_" + str_reliable + ".csv"); outFile << output_file_128.str(); outFile.close(); outFile.open(prefix + "_256_" + str_reliable + ".csv"); outFile << output_file_256.str(); outFile.close(); outFile.open(prefix + "_512_" + str_reliable + ".csv"); outFile << output_file_512.str(); outFile.close(); outFile.open(prefix + "_1024_" + str_reliable + ".csv"); outFile << output_file_1024.str(); outFile.close(); outFile.open(prefix + "_2048_" + str_reliable + ".csv"); outFile << output_file_2048.str(); outFile.close(); outFile.open(prefix + "_4096_" + str_reliable + ".csv"); outFile << output_file_4096.str(); outFile.close(); outFile.open(prefix + "_8192_" + str_reliable + ".csv"); outFile << output_file_8192.str(); outFile.close(); outFile.open(prefix + "_16384_" + str_reliable + ".csv"); outFile << output_file_16384.str(); outFile.close(); } } bool LatencyTestPublisher::test(uint32_t datasize) { //cout << "Beginning test of size: "<<datasize+4 <<endl; m_status = 0; n_received = 0; if (dynamic_data) { m_DynData_in = DynamicDataFactory::GetInstance()->CreateData(m_pDynType); m_DynData_out = DynamicDataFactory::GetInstance()->CreateData(m_pDynType); MemberId id_in, id_out; DynamicData *my_data_in = m_DynData_in->LoanValue(m_DynData_in->GetMemberIdAtIndex(1)); DynamicData *my_data_out = m_DynData_out->LoanValue(m_DynData_out->GetMemberIdAtIndex(1)); for (uint32_t i = 0; i < datasize; ++i) { my_data_in->InsertSequenceData(id_in); my_data_in->SetByteValue(0, id_in); my_data_out->InsertSequenceData(id_out); my_data_out->SetByteValue(0, id_out); } m_DynData_in->ReturnLoanedValue(my_data_in); m_DynData_out->ReturnLoanedValue(my_data_out); } else { mp_latency_in = new LatencyType(datasize); mp_latency_out = new LatencyType(datasize); } times_.clear(); TestCommandType command; command.m_command = READY; mp_commandpub->write(&command); //cout << "WAITING FOR COMMAND RESPONSES "<<endl;; std::unique_lock<std::mutex> lock(mutex_); comm_cond_.wait(lock, [&]() { return comm_count_ >= n_subscribers; }); comm_count_ = 0; lock.unlock(); //cout << endl; //BEGIN THE TEST: for(unsigned int count = 1; count <= n_samples; ++count) { if (dynamic_data) { m_DynData_in->SetUint32Value(0, 0); m_DynData_out->SetUint32Value(count, 0); t_start_ = std::chrono::steady_clock::now(); mp_datapub->write((void*)m_DynData_out); } else { mp_latency_in->seqnum = 0; mp_latency_out->seqnum = count; t_start_ = std::chrono::steady_clock::now(); mp_datapub->write((void*)mp_latency_out); } lock.lock(); data_cond_.wait_for(lock, std::chrono::seconds(1), [&]() { return data_count_ > 0; }); data_count_ = 0; lock.unlock(); } command.m_command = STOP; mp_commandpub->write(&command); if(m_status !=0) { cout << "Error in test "<<endl; return false; } //TEST FINISHED: size_t removed=0; mp_datapub->removeAllChange(&removed); //cout << " REMOVED: "<< removed<<endl; analyzeTimes(datasize); printStat(m_stats.back()); if (dynamic_data) { DynamicDataFactory::GetInstance()->DeleteData(m_DynData_in); DynamicDataFactory::GetInstance()->DeleteData(m_DynData_out); } else { delete(mp_latency_in); delete(mp_latency_out); } return true; } void LatencyTestPublisher::analyzeTimes(uint32_t datasize) { TimeStats TS; TS.nbytes = datasize+4; TS.received = n_received; TS.m_min = *std::min_element(times_.begin(), times_.end()); TS.m_max = *std::max_element(times_.begin(), times_.end()); TS.mean = std::accumulate(times_.begin(), times_.end(), std::chrono::duration<double, std::micro>(0)).count() / times_.size(); double auxstdev=0; for(std::vector<std::chrono::duration<double, std::micro>>::iterator tit = times_.begin(); tit != times_.end(); ++tit) { auxstdev += pow(((*tit).count() - TS.mean), 2); } auxstdev = sqrt(auxstdev / times_.size()); TS.stdev = static_cast<double>(round(auxstdev)); std::sort(times_.begin(), times_.end()); size_t elem = 0; elem = static_cast<size_t>(times_.size() * 0.5); if(elem > 0 && elem <= times_.size()) TS.p50 = times_.at(--elem).count(); else TS.p50 = NAN; elem = static_cast<size_t>(times_.size() * 0.9); if(elem > 0 && elem <= times_.size()) TS.p90 = times_.at(--elem).count(); else TS.p90 = NAN; elem = static_cast<size_t>(times_.size() * 0.99); if(elem > 0 && elem <= times_.size()) TS.p99 = times_.at(--elem).count(); else TS.p99 = NAN; elem = static_cast<size_t>(times_.size() * 0.9999); if(elem > 0 && elem <= times_.size()) TS.p9999 = times_.at(--elem).count(); else TS.p9999 = NAN; m_stats.push_back(TS); } void LatencyTestPublisher::printStat(TimeStats& TS) { output_file_minimum << "\"" << TS.m_min.count() << "\""; output_file_average << "\"" << TS.mean << "\""; switch (TS.nbytes) { case 16: output_file_16 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 32: output_file_32 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 64: output_file_64 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 128: output_file_128 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 256: output_file_256 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 512: output_file_512 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 1024: output_file_1024 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 2048: output_file_2048 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 4096: output_file_4096 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 8192: output_file_8192 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 16384: output_file_16384 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 64000: output_file_64000 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; case 131072: output_file_131072 << "\"" << TS.m_min.count() << "\",\"" << TS.mean << "\"" << std::endl; break; default: break; } #ifdef _WIN32 printf("%8I64u,%8u,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f \n", TS.nbytes, TS.received, TS.stdev, TS.mean, TS.m_min.count(), TS.p50, TS.p90, TS.p99, TS.p9999, TS.m_max.count()); #else printf("%8" PRIu64 ",%8u,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f,%8.2f \n", TS.nbytes, TS.received, TS.stdev, TS.mean, TS.m_min.count(), TS.p50, TS.p90, TS.p99, TS.p9999, TS.m_max.count()); #endif }
1
13,200
Why this new configuration?
eProsima-Fast-DDS
cpp
@@ -245,8 +245,14 @@ public class IcebergPigInputFormat<T> extends InputFormat<Void, T> { private Object convertPartitionValue(Type type, Object value) { if (type.typeId() == Types.BinaryType.get().typeId()) { - ByteBuffer buffer = (ByteBuffer) value; - return new DataByteArray(buffer.get(new byte[buffer.remaining()]).array()); + ByteBuffer dupe = ((ByteBuffer) value).duplicate(); + if (dupe.hasArray()) { + return new DataByteArray(dupe.array()); + } else { + byte[] bytes = new byte[dupe.remaining()]; + dupe.get(bytes); + return new DataByteArray(bytes); + } } return value;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.pig; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.lang3.SerializationUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.iceberg.CombinedScanTask; import org.apache.iceberg.DataFile; import org.apache.iceberg.FileScanTask; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.Table; import org.apache.iceberg.TableScan; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.hadoop.HadoopInputFile; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.parquet.Parquet; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.types.Types; import org.apache.pig.data.DataByteArray; import org.apache.pig.impl.util.ObjectSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class IcebergPigInputFormat<T> extends InputFormat<Void, T> { private static final Logger LOG = LoggerFactory.getLogger(IcebergPigInputFormat.class); static final String ICEBERG_SCHEMA = "iceberg.schema"; static final String ICEBERG_PROJECTED_FIELDS = "iceberg.projected.fields"; static final String ICEBERG_FILTER_EXPRESSION = "iceberg.filter.expression"; private Table table; private String signature; private List<InputSplit> splits; IcebergPigInputFormat(Table table, String signature) { this.table = table; this.signature = signature; } @Override @SuppressWarnings("unchecked") public List<InputSplit> getSplits(JobContext context) throws IOException { if (splits != null) { LOG.info("Returning cached splits: {}", splits.size()); return splits; } splits = Lists.newArrayList(); TableScan scan = table.newScan(); //Apply Filters Expression filterExpression = (Expression) ObjectSerializer.deserialize(context.getConfiguration().get(scope(ICEBERG_FILTER_EXPRESSION))); LOG.info("[{}]: iceberg filter expressions: {}", signature, filterExpression); if (filterExpression != null) { LOG.info("Filter Expression: {}", filterExpression); scan = scan.filter(filterExpression); } //Wrap in Splits try (CloseableIterable<CombinedScanTask> tasks = scan.planTasks()) { tasks.forEach(scanTask -> splits.add(new IcebergSplit(scanTask))); } return splits; } @Override public RecordReader<Void, T> createRecordReader(InputSplit split, TaskAttemptContext context) { return new IcebergRecordReader<>(); } private static class IcebergSplit extends InputSplit implements Writable { private static final String[] ANYWHERE = new String[] { "*" }; private CombinedScanTask task; IcebergSplit(CombinedScanTask task) { this.task = task; } @Override public long getLength() { return task.files().stream().mapToLong(FileScanTask::length).sum(); } @Override public String[] getLocations() { return ANYWHERE; } @Override public void write(DataOutput out) throws IOException { byte[] data = SerializationUtils.serialize(this.task); out.writeInt(data.length); out.write(data); } @Override public void readFields(DataInput in) throws IOException { byte[] data = new byte[in.readInt()]; in.readFully(data); this.task = (CombinedScanTask) SerializationUtils.deserialize(data); } } private String scope(String key) { return key + '.' + signature; } public class IcebergRecordReader<T> extends RecordReader<Void, T> { private TaskAttemptContext context; private Iterator<FileScanTask> tasks; private CloseableIterable reader; private Iterator<T> recordIterator; private T currentRecord; @Override public void initialize(InputSplit split, TaskAttemptContext initContext) throws IOException { this.context = initContext; CombinedScanTask task = ((IcebergSplit) split).task; this.tasks = task.files().iterator(); advance(); } @SuppressWarnings("unchecked") private boolean advance() throws IOException { if (reader != null) { reader.close(); } if (!tasks.hasNext()) { return false; } FileScanTask currentTask = tasks.next(); Schema tableSchema = (Schema) ObjectSerializer.deserialize(context.getConfiguration().get(scope(ICEBERG_SCHEMA))); LOG.debug("[{}]: Task table schema: {}", signature, tableSchema); List<String> projectedFields = (List<String>) ObjectSerializer.deserialize(context.getConfiguration().get(scope(ICEBERG_PROJECTED_FIELDS))); LOG.debug("[{}]: Task projected fields: {}", signature, projectedFields); Schema projectedSchema = projectedFields != null ? SchemaUtil.project(tableSchema, projectedFields) : tableSchema; PartitionSpec spec = currentTask.asFileScanTask().spec(); DataFile file = currentTask.file(); InputFile inputFile = HadoopInputFile.fromLocation(file.path(), context.getConfiguration()); Set<Integer> idColumns = spec.identitySourceIds(); // schema needed for the projection and filtering boolean hasJoinedPartitionColumns = !idColumns.isEmpty(); switch (file.format()) { case PARQUET: Map<Integer, Object> partitionValueMap = Maps.newHashMap(); if (hasJoinedPartitionColumns) { Schema readSchema = TypeUtil.selectNot(projectedSchema, idColumns); Schema projectedPartitionSchema = TypeUtil.select(projectedSchema, idColumns); Map<String, Integer> partitionSpecFieldIndexMap = Maps.newHashMap(); for (int i = 0; i < spec.fields().size(); i++) { partitionSpecFieldIndexMap.put(spec.fields().get(i).name(), i); } for (Types.NestedField field : projectedPartitionSchema.columns()) { int partitionIndex = partitionSpecFieldIndexMap.get(field.name()); Object partitionValue = file.partition().get(partitionIndex, Object.class); partitionValueMap.put(field.fieldId(), convertPartitionValue(field.type(), partitionValue)); } reader = Parquet.read(inputFile) .project(readSchema) .split(currentTask.start(), currentTask.length()) .filter(currentTask.residual()) .createReaderFunc( fileSchema -> PigParquetReader.buildReader(fileSchema, projectedSchema, partitionValueMap)) .build(); } else { reader = Parquet.read(inputFile) .project(projectedSchema) .split(currentTask.start(), currentTask.length()) .filter(currentTask.residual()) .createReaderFunc( fileSchema -> PigParquetReader.buildReader(fileSchema, projectedSchema, partitionValueMap)) .build(); } recordIterator = reader.iterator(); break; default: throw new UnsupportedOperationException("Unsupported file format: " + file.format()); } return true; } private Object convertPartitionValue(Type type, Object value) { if (type.typeId() == Types.BinaryType.get().typeId()) { ByteBuffer buffer = (ByteBuffer) value; return new DataByteArray(buffer.get(new byte[buffer.remaining()]).array()); } return value; } @Override public boolean nextKeyValue() throws IOException { if (recordIterator.hasNext()) { currentRecord = recordIterator.next(); return true; } while (advance()) { if (recordIterator.hasNext()) { currentRecord = recordIterator.next(); return true; } } return false; } @Override public Void getCurrentKey() { return null; } @Override public T getCurrentValue() { return currentRecord; } @Override public float getProgress() { return 0; } @Override public void close() { } } }
1
23,088
I don't think that we need to check `hasArray` here. I think the reason why this didn't previously check `hasArray` is that the array passed to `DataByteArray` must start at offset 0 and be valid through the array length, so a copy was needed in almost every case. It may be simpler to change this to use `ByteBuffers.toByteArray` and pass the result to create `DataByteArray`.
apache-iceberg
java
@@ -28,15 +28,15 @@ namespace OpenTelemetry.Metrics /// <summary> /// Adds the given value to the bound counter metric. /// </summary> - /// <param name="context">the associated <see cref="SpanContext"/>.</param> + /// <param name="spanReference">the associated <see cref="SpanReference"/>.</param> /// <param name="value">value by which the bound counter metric should be added.</param> - public abstract void Add(in SpanContext context, T value); + public abstract void Add(in SpanReference spanReference, T value); /// <summary> /// Adds the given value to the bound counter metric. /// </summary> - /// <param name="context">the associated <see cref="Baggage"/>.</param> + /// <param name="spanReference">the associated <see cref="Baggage"/>.</param> /// <param name="value">value by which the bound counter metric should be added.</param> - public abstract void Add(in Baggage context, T value); + public abstract void Add(in Baggage spanReference, T value); } }
1
// <copyright file="BoundCounterMetric.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using OpenTelemetry.Trace; namespace OpenTelemetry.Metrics { /// <summary> /// Bound counter metric with the defined <see cref="LabelSet"/>. /// </summary> /// <typeparam name="T">The type of counter. Only long and double are supported now.</typeparam> public abstract class BoundCounterMetric<T> where T : struct { /// <summary> /// Adds the given value to the bound counter metric. /// </summary> /// <param name="context">the associated <see cref="SpanContext"/>.</param> /// <param name="value">value by which the bound counter metric should be added.</param> public abstract void Add(in SpanContext context, T value); /// <summary> /// Adds the given value to the bound counter metric. /// </summary> /// <param name="context">the associated <see cref="Baggage"/>.</param> /// <param name="value">value by which the bound counter metric should be added.</param> public abstract void Add(in Baggage context, T value); } }
1
17,564
`spanReference` -> `baggage`
open-telemetry-opentelemetry-dotnet
.cs
@@ -32,9 +32,8 @@ module.exports = function(config, auth, storage) { }); }); - router.get('/-/logo', function(req, res) { - res.sendFile(_.get(config, 'web.logo') || `${env.APP_ROOT}/static/logo-sm.png` - ); + router.get('/-/verdaccio/logo', function(req, res) { + res.send(_.get(config, 'web.logo') || '/-/static/logo.png'); }); router.get('/', function(req, res) {
1
'use strict'; const express = require('express'); const Search = require('../../lib/search'); const Middleware = require('./middleware'); const Utils = require('../../lib/utils'); /* eslint new-cap:off */ const router = express.Router(); const _ = require('lodash'); const env = require('../../config/env'); const fs = require('fs'); const template = fs.readFileSync(`${env.DIST_PATH}/index.html`).toString(); module.exports = function(config, auth, storage) { Search.configureStorage(storage); router.use(auth.jwtMiddleware()); router.use(Middleware.securityIframe); // Static router.get('/-/static/:filename', function(req, res, next) { const file = `${env.APP_ROOT}/static/${req.params.filename}`; res.sendFile(file, function(err) { if (!err) { return; } if (err.status === 404) { next(); } else { next(err); } }); }); router.get('/-/logo', function(req, res) { res.sendFile(_.get(config, 'web.logo') || `${env.APP_ROOT}/static/logo-sm.png` ); }); router.get('/', function(req, res) { const base = Utils.combineBaseUrl(Utils.getWebProtocol(req), req.get('host'), config.url_prefix); const defaultTitle = 'Verdaccio'; let webPage = template .replace(/ToReplaceByVerdaccio/g, base) .replace(/ToReplaceByTitle/g, _.get(config, 'web.title') ? config.web.title : defaultTitle) .replace(/(main.*\.js|style.*\.css)/g, `${base}/-/static/$1`); res.setHeader('Content-Type', 'text/html'); res.send(webPage); }); return router; };
1
17,217
Why the `/-/verdaccio/` ?
verdaccio-verdaccio
js
@@ -478,8 +478,7 @@ func TestAppDeployOpts_pushAddonsTemplateToS3Bucket(t *testing.T) { opts := appDeployOpts{ appDeployVars: appDeployVars{ - AppName: tc.inputApp, - enableAddons: true, + AppName: tc.inputApp, GlobalOpts: &GlobalOpts{ projectName: tc.inputProject, },
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "bytes" "errors" "fmt" "testing" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" climocks "github.com/aws/amazon-ecs-cli-v2/internal/pkg/cli/mocks" ) func TestAppDeployOpts_Validate(t *testing.T) { testCases := map[string]struct { inProjectName string inAppName string inEnvName string mockWs func(m *climocks.MockwsAppReader) mockStore func(m *climocks.MockprojectService) wantedError error }{ "no existing projects": { mockWs: func(m *climocks.MockwsAppReader) {}, mockStore: func(m *climocks.MockprojectService) {}, wantedError: errNoProjectInWorkspace, }, "with workspace error": { inProjectName: "phonetool", inAppName: "frontend", mockWs: func(m *climocks.MockwsAppReader) { m.EXPECT().AppNames().Return(nil, errors.New("some error")) }, mockStore: func(m *climocks.MockprojectService) {}, wantedError: errors.New("list applications in the workspace: some error"), }, "with application not in workspace": { inProjectName: "phonetool", inAppName: "frontend", mockWs: func(m *climocks.MockwsAppReader) { m.EXPECT().AppNames().Return([]string{}, nil) }, mockStore: func(m *climocks.MockprojectService) {}, wantedError: errors.New("application frontend not found in the workspace"), }, "with unknown environment": { inProjectName: "phonetool", inEnvName: "test", mockWs: func(m *climocks.MockwsAppReader) {}, mockStore: func(m *climocks.MockprojectService) { m.EXPECT().GetEnvironment("phonetool", "test"). Return(nil, errors.New("unknown env")) }, wantedError: errors.New("get environment test from metadata store: unknown env"), }, "successful validation": { inProjectName: "phonetool", inAppName: "frontend", inEnvName: "test", mockWs: func(m *climocks.MockwsAppReader) { m.EXPECT().AppNames().Return([]string{"frontend"}, nil) }, mockStore: func(m *climocks.MockprojectService) { m.EXPECT().GetEnvironment("phonetool", "test"). Return(&archer.Environment{Name: "test"}, nil) }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { // GIVEN ctrl := gomock.NewController(t) defer ctrl.Finish() mockWs := climocks.NewMockwsAppReader(ctrl) mockStore := climocks.NewMockprojectService(ctrl) tc.mockWs(mockWs) tc.mockStore(mockStore) opts := appDeployOpts{ appDeployVars: appDeployVars{ GlobalOpts: &GlobalOpts{ projectName: tc.inProjectName, }, AppName: tc.inAppName, EnvName: tc.inEnvName, }, workspaceService: mockWs, projectService: mockStore, } // WHEN err := opts.Validate() // THEN if tc.wantedError != nil { require.EqualError(t, err, tc.wantedError.Error()) } else { require.Nil(t, err) } }) } } func TestAppDeployOpts_Ask(t *testing.T) { testCases := map[string]struct { inProjectName string inAppName string inEnvName string inImageTag string mockWs func(m *climocks.MockwsAppReader) mockStore func(m *climocks.MockprojectService) mockPrompt func(m *climocks.Mockprompter) wantedAppName string wantedEnvName string wantedImageTag string wantedError error }{ "no applications in the workspace": { mockWs: func(m *climocks.MockwsAppReader) { m.EXPECT().AppNames().Return([]string{}, nil) }, mockStore: func(m *climocks.MockprojectService) {}, mockPrompt: func(m *climocks.Mockprompter) {}, wantedError: errors.New("no applications found in the workspace"), }, "default to single application": { inEnvName: "test", inImageTag: "latest", mockWs: func(m *climocks.MockwsAppReader) { m.EXPECT().AppNames().Return([]string{"frontend"}, nil) }, mockStore: func(m *climocks.MockprojectService) {}, mockPrompt: func(m *climocks.Mockprompter) {}, wantedAppName: "frontend", wantedEnvName: "test", wantedImageTag: "latest", }, "prompts for application name if there are more than one option": { inEnvName: "test", inImageTag: "latest", mockWs: func(m *climocks.MockwsAppReader) { m.EXPECT().AppNames().Return([]string{"frontend", "webhook"}, nil) }, mockStore: func(m *climocks.MockprojectService) {}, mockPrompt: func(m *climocks.Mockprompter) { m.EXPECT().SelectOne("Select an application", "", []string{"frontend", "webhook"}). Return("frontend", nil) }, wantedAppName: "frontend", wantedEnvName: "test", wantedImageTag: "latest", }, "fails to list environments": { inProjectName: "phonetool", inAppName: "frontend", inImageTag: "latest", mockWs: func(m *climocks.MockwsAppReader) {}, mockStore: func(m *climocks.MockprojectService) { m.EXPECT().ListEnvironments("phonetool").Return(nil, errors.New("some error")) }, mockPrompt: func(m *climocks.Mockprompter) { }, wantedError: errors.New("get environments for project phonetool from metadata store: some error"), }, "no existing environments": { inProjectName: "phonetool", inAppName: "frontend", inImageTag: "latest", mockWs: func(m *climocks.MockwsAppReader) {}, mockStore: func(m *climocks.MockprojectService) { m.EXPECT().ListEnvironments("phonetool").Return([]*archer.Environment{}, nil) }, mockPrompt: func(m *climocks.Mockprompter) { }, wantedError: errors.New("no environments found in project phonetool"), }, "defaults to single environment": { inProjectName: "phonetool", inAppName: "frontend", inImageTag: "latest", mockWs: func(m *climocks.MockwsAppReader) {}, mockStore: func(m *climocks.MockprojectService) { m.EXPECT().ListEnvironments("phonetool").Return([]*archer.Environment{ { Name: "test", }, }, nil) }, mockPrompt: func(m *climocks.Mockprompter) { }, wantedAppName: "frontend", wantedEnvName: "test", wantedImageTag: "latest", }, "prompts for environment name if there are more than one option": { inProjectName: "phonetool", inAppName: "frontend", inImageTag: "latest", mockWs: func(m *climocks.MockwsAppReader) {}, mockStore: func(m *climocks.MockprojectService) { m.EXPECT().ListEnvironments("phonetool").Return([]*archer.Environment{ { Name: "test", }, { Name: "prod-iad", }, }, nil) }, mockPrompt: func(m *climocks.Mockprompter) { m.EXPECT().SelectOne("Select an environment", "", []string{"test", "prod-iad"}). Return("prod-iad", nil) }, wantedAppName: "frontend", wantedEnvName: "prod-iad", wantedImageTag: "latest", }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { // GIVEN ctrl := gomock.NewController(t) defer ctrl.Finish() mockWs := climocks.NewMockwsAppReader(ctrl) mockStore := climocks.NewMockprojectService(ctrl) mockPrompt := climocks.NewMockprompter(ctrl) tc.mockWs(mockWs) tc.mockStore(mockStore) tc.mockPrompt(mockPrompt) opts := appDeployOpts{ appDeployVars: appDeployVars{ GlobalOpts: &GlobalOpts{ projectName: tc.inProjectName, prompt: mockPrompt, }, AppName: tc.inAppName, EnvName: tc.inEnvName, ImageTag: tc.inImageTag, }, workspaceService: mockWs, projectService: mockStore, } // WHEN err := opts.Ask() // THEN if tc.wantedError == nil { require.Nil(t, err) require.Equal(t, tc.wantedAppName, opts.AppName) require.Equal(t, tc.wantedEnvName, opts.EnvName) require.Equal(t, tc.wantedImageTag, opts.ImageTag) } else { require.EqualError(t, err, tc.wantedError.Error()) } }) } } func TestAppDeployOpts_getAppDockerfilePath(t *testing.T) { var mockWorkspace *climocks.MockwsAppReader mockError := errors.New("mockError") mockManifest := []byte(`name: appA type: 'Load Balanced Web App' image: build: appA/Dockerfile `) tests := map[string]struct { inputApp string setupMocks func(controller *gomock.Controller) wantPath string wantErr error }{ "should return error if workspaceService ReadFile returns error": { inputApp: "appA", setupMocks: func(controller *gomock.Controller) { mockWorkspace = climocks.NewMockwsAppReader(controller) gomock.InOrder( mockWorkspace.EXPECT().ReadAppManifest("appA").Times(1).Return(nil, mockError), ) }, wantPath: "", wantErr: fmt.Errorf("read manifest file %s: %w", "appA", mockError), }, "should trim the manifest DockerfilePath if it contains /Dockerfile": { inputApp: "appA", setupMocks: func(controller *gomock.Controller) { mockWorkspace = climocks.NewMockwsAppReader(controller) gomock.InOrder( mockWorkspace.EXPECT().ReadAppManifest("appA").Times(1).Return(mockManifest, nil), ) }, wantPath: "appA", wantErr: nil, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() test.setupMocks(ctrl) opts := appDeployOpts{ appDeployVars: appDeployVars{ AppName: test.inputApp, }, workspaceService: mockWorkspace, } gotPath, gotErr := opts.getAppDockerfilePath() require.Equal(t, test.wantPath, gotPath) require.Equal(t, test.wantErr, gotErr) }) } } func TestAppDeployOpts_pushAddonsTemplateToS3Bucket(t *testing.T) { mockError := errors.New("some error") buf := &bytes.Buffer{} fmt.Fprint(buf, "some data") tests := map[string]struct { addonsTemplate *bytes.Buffer inputApp string inputProject string inEnvironment *archer.Environment mockProjectSvc func(m *climocks.MockprojectService) mockProjectResourcesGetter func(m *climocks.MockprojectResourcesGetter) mockS3Svc func(m *climocks.MockartifactPutter) wantPath string wantErr error }{ "should push addons template to S3 bucket": { addonsTemplate: buf, inputApp: "mockApp", inputProject: "mockProject", inEnvironment: &archer.Environment{ Name: "mockEnv", Region: "us-west-2", }, mockProjectSvc: func(m *climocks.MockprojectService) { m.EXPECT().GetProject("mockProject").Return(&archer.Project{ Name: "mockProject", }, nil) }, mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) { m.EXPECT().GetProjectResourcesByRegion(&archer.Project{ Name: "mockProject", }, "us-west-2").Return(&archer.ProjectRegionalResources{ S3Bucket: "mockBucket", }, nil) }, mockS3Svc: func(m *climocks.MockartifactPutter) { m.EXPECT().PutArtifact("mockBucket", "mockApp.addons.stack.yml", buf).Return("https://mockS3DomainName/mockPath", nil) }, wantErr: nil, wantPath: "https://mockS3DomainName/mockPath", }, "should return error if fail to get project": { addonsTemplate: buf, inputApp: "mockApp", inputProject: "mockProject", inEnvironment: &archer.Environment{ Name: "mockEnv", Region: "us-west-2", }, mockProjectSvc: func(m *climocks.MockprojectService) { m.EXPECT().GetProject("mockProject").Return(nil, mockError) }, mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) {}, mockS3Svc: func(m *climocks.MockartifactPutter) {}, wantErr: fmt.Errorf("get project: some error"), }, "should return error if fail to get project resources": { addonsTemplate: buf, inputApp: "mockApp", inputProject: "mockProject", inEnvironment: &archer.Environment{ Name: "mockEnv", Region: "us-west-2", }, mockProjectSvc: func(m *climocks.MockprojectService) { m.EXPECT().GetProject("mockProject").Return(&archer.Project{ Name: "mockProject", }, nil) }, mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) { m.EXPECT().GetProjectResourcesByRegion(&archer.Project{ Name: "mockProject", }, "us-west-2").Return(nil, mockError) }, mockS3Svc: func(m *climocks.MockartifactPutter) {}, wantErr: fmt.Errorf("get project resources: some error"), }, "should return error if fail to upload to S3 bucket": { addonsTemplate: buf, inputApp: "mockApp", inputProject: "mockProject", inEnvironment: &archer.Environment{ Name: "mockEnv", Region: "us-west-2", }, mockProjectSvc: func(m *climocks.MockprojectService) { m.EXPECT().GetProject("mockProject").Return(&archer.Project{ Name: "mockProject", }, nil) }, mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) { m.EXPECT().GetProjectResourcesByRegion(&archer.Project{ Name: "mockProject", }, "us-west-2").Return(&archer.ProjectRegionalResources{ S3Bucket: "mockBucket", }, nil) }, mockS3Svc: func(m *climocks.MockartifactPutter) { m.EXPECT().PutArtifact("mockBucket", "mockApp.addons.stack.yml", buf).Return("", mockError) }, wantErr: fmt.Errorf("put addons artifact to bucket mockBucket: some error"), }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockProjectSvc := climocks.NewMockprojectService(ctrl) mockProjectResourcesGetter := climocks.NewMockprojectResourcesGetter(ctrl) mockS3Svc := climocks.NewMockartifactPutter(ctrl) tc.mockProjectSvc(mockProjectSvc) tc.mockProjectResourcesGetter(mockProjectResourcesGetter) tc.mockS3Svc(mockS3Svc) opts := appDeployOpts{ appDeployVars: appDeployVars{ AppName: tc.inputApp, enableAddons: true, GlobalOpts: &GlobalOpts{ projectName: tc.inputProject, }, }, projectService: mockProjectSvc, appPackageCfClient: mockProjectResourcesGetter, s3Service: mockS3Svc, targetEnvironment: tc.inEnvironment, } gotPath, gotErr := opts.pushAddonsTemplateToS3Bucket(tc.addonsTemplate) if gotErr != nil { require.EqualError(t, gotErr, tc.wantErr.Error()) } else { require.Equal(t, tc.wantPath, gotPath) } }) } }
1
12,280
Has been waiting for a million years
aws-copilot-cli
go
@@ -31,6 +31,10 @@ msg.Number = {}; msg.Number.min = "Path `{PATH}` ({VALUE}) is less than minimum allowed value ({MIN})."; msg.Number.max = "Path `{PATH}` ({VALUE}) is more than maximum allowed value ({MAX})."; +msg.Date = {}; +msg.Date.min = "Path `{PATH}` ({VALUE}) is before than minimum allowed value ({MIN})."; +msg.Date.max = "Path `{PATH}` ({VALUE}) is after than maximum allowed value ({MAX})."; + msg.String = {}; msg.String.enum = "`{VALUE}` is not a valid enum value for path `{PATH}`."; msg.String.match = "Path `{PATH}` is invalid ({VALUE}).";
1
/** * The default built-in validator error messages. These may be customized. * * // customize within each schema or globally like so * var mongoose = require('mongoose'); * mongoose.Error.messages.String.enum = "Your custom message for {PATH}."; * * As you might have noticed, error messages support basic templating * * - `{PATH}` is replaced with the invalid document path * - `{VALUE}` is replaced with the invalid value * - `{TYPE}` is replaced with the validator type such as "regexp", "min", or "user defined" * - `{MIN}` is replaced with the declared min value for the Number.min validator * - `{MAX}` is replaced with the declared max value for the Number.max validator * * Click the "show code" link below to see all defaults. * * @property messages * @receiver MongooseError * @api public */ var msg = module.exports = exports = {}; msg.general = {}; msg.general.default = "Validator failed for path `{PATH}` with value `{VALUE}`"; msg.general.required = "Path `{PATH}` is required."; msg.Number = {}; msg.Number.min = "Path `{PATH}` ({VALUE}) is less than minimum allowed value ({MIN})."; msg.Number.max = "Path `{PATH}` ({VALUE}) is more than maximum allowed value ({MAX})."; msg.String = {}; msg.String.enum = "`{VALUE}` is not a valid enum value for path `{PATH}`."; msg.String.match = "Path `{PATH}` is invalid ({VALUE}).";
1
12,633
Minor grammar detail: the 'than' is unnecessary
Automattic-mongoose
js
@@ -4873,10 +4873,11 @@ class Series(Frame, IndexOpsMixin, Generic[T]): >>> kser.item() 10 """ - item_top_two = self[:2] + scol = self.spark_column + item_top_two = self._internal._sdf.select(scol).head(2) if len(item_top_two) != 1: raise ValueError("can only convert an array of size 1 to a Python scalar") - return item_top_two[0] + return item_top_two[0][0] def _cum(self, func, skipna, part_cols=()): # This is used to cummin, cummax, cumsum, etc.
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark Column to behave similar to pandas Series. """ import re import inspect from collections import Iterable, OrderedDict from functools import partial, wraps, reduce from typing import Any, Generic, List, Optional, Tuple, TypeVar, Union import numpy as np import pandas as pd from pandas.core.accessor import CachedAccessor from pandas.io.formats.printing import pprint_thing from pandas.api.types import is_list_like from databricks.koalas.typedef import infer_return_type, SeriesType, ScalarType from pyspark import sql as spark from pyspark.sql import functions as F, Column from pyspark.sql.functions import pandas_udf, PandasUDFType from pyspark.sql.types import ( BooleanType, DoubleType, FloatType, StringType, StructType, LongType, IntegerType, ) from pyspark.sql.window import Window from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.config import get_option, option_context from databricks.koalas.base import IndexOpsMixin from databricks.koalas.exceptions import SparkPandasIndexingError from databricks.koalas.frame import DataFrame from databricks.koalas.generic import Frame from databricks.koalas.internal import ( InternalFrame, NATURAL_ORDER_COLUMN_NAME, SPARK_DEFAULT_INDEX_NAME, ) from databricks.koalas.missing.series import MissingPandasLikeSeries from databricks.koalas.plot import KoalasSeriesPlotMethods from databricks.koalas.ml import corr from databricks.koalas.utils import ( validate_arguments_and_invoke_function, scol_for, combine_frames, name_like_string, validate_axis, validate_bool_kwarg, verify_temp_column_name, ) from databricks.koalas.datetimes import DatetimeMethods from databricks.koalas.strings import StringMethods # This regular expression pattern is complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ in Series. # This pattern basically seeks the footer string from Pandas' REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)") _flex_doc_SERIES = """ Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}`` Parameters ---------- other : Series or scalar value Returns ------- Series The result of the operation. See Also -------- Series.{reverse} {series_examples} """ _add_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.add(df.b) a 4.0 b NaN c 6.0 d NaN Name: a, dtype: float64 >>> df.a.radd(df.b) a 4.0 b NaN c 6.0 d NaN Name: a, dtype: float64 """ _sub_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.subtract(df.b) a 0.0 b NaN c 2.0 d NaN Name: a, dtype: float64 >>> df.a.rsub(df.b) a 0.0 b NaN c -2.0 d NaN Name: a, dtype: float64 """ _mul_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.multiply(df.b) a 4.0 b NaN c 8.0 d NaN Name: a, dtype: float64 >>> df.a.rmul(df.b) a 4.0 b NaN c 8.0 d NaN Name: a, dtype: float64 """ _div_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.divide(df.b) a 1.0 b NaN c 2.0 d NaN Name: a, dtype: float64 >>> df.a.rdiv(df.b) a 1.0 b NaN c 0.5 d NaN Name: a, dtype: float64 """ _pow_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.pow(df.b) a 4.0 b NaN c 16.0 d NaN Name: a, dtype: float64 >>> df.a.rpow(df.b) a 4.0 b NaN c 16.0 d NaN Name: a, dtype: float64 """ _mod_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.mod(df.b) a 0.0 b NaN c 0.0 d NaN Name: a, dtype: float64 >>> df.a.rmod(df.b) a 0.0 b NaN c 2.0 d NaN Name: a, dtype: float64 """ _floordiv_example_SERIES = """ Examples -------- >>> df = ks.DataFrame({'a': [2, 2, 4, np.nan], ... 'b': [2, np.nan, 2, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df a b a 2.0 2.0 b 2.0 NaN c 4.0 2.0 d NaN NaN >>> df.a.floordiv(df.b) a 1.0 b NaN c 2.0 d NaN Name: a, dtype: float64 >>> df.a.rfloordiv(df.b) a 1.0 b NaN c 0.0 d NaN Name: a, dtype: float64 """ T = TypeVar("T") # Needed to disambiguate Series.str and str type str_type = str class Series(Frame, IndexOpsMixin, Generic[T]): """ Koalas Series that corresponds to Pandas Series logically. This holds Spark Column internally. :ivar _internal: an internal immutable Frame to manage metadata. :type _internal: InternalFrame :ivar _kdf: Parent's Koalas DataFrame :type _kdf: ks.DataFrame Parameters ---------- data : array-like, dict, or scalar value, Pandas Series Contains data stored in Series If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a Pandas Series, other arguments should not be used. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index sequence are used, the index will override the keys found in the dict. dtype : numpy.dtype or None If None, dtype will be inferred copy : boolean, default False Copy input data """ def __init__( self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False, anchor=None ): if isinstance(data, InternalFrame): assert dtype is None assert name is None assert not copy assert not fastpath IndexOpsMixin.__init__(self, data, anchor) else: assert anchor is None if isinstance(data, pd.Series): assert index is None assert dtype is None assert name is None assert not copy assert not fastpath s = data else: s = pd.Series( data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath ) kdf = DataFrame(s) IndexOpsMixin.__init__( self, kdf._internal.copy(spark_column=kdf._internal.data_spark_columns[0]), kdf ) def _with_new_scol(self, scol: spark.Column) -> "Series": """ Copy Koalas Series with the new Spark Column. :param scol: the new Spark Column :return: the copied Series """ return Series(self._internal.copy(spark_column=scol), anchor=self._kdf) # type: ignore @property def dtypes(self): """Return the dtype object of the underlying data. >>> s = ks.Series(list('abc')) >>> s.dtype == s.dtypes True """ return self.dtype @property def axes(self): """ Return a list of the row axis labels. Examples -------- >>> kser = ks.Series([1, 2, 3]) >>> kser.axes [Int64Index([0, 1, 2], dtype='int64')] """ return [self.index] @property def spark_type(self): """ Returns the data type as defined by Spark, as a Spark DataType object.""" return self._internal.spark_type_for(self._internal.column_labels[0]) plot = CachedAccessor("plot", KoalasSeriesPlotMethods) # Arithmetic Operators def add(self, other): return (self + other).rename(self.name) add.__doc__ = _flex_doc_SERIES.format( desc="Addition", op_name="+", equiv="series + other", reverse="radd", series_examples=_add_example_SERIES, ) def radd(self, other): return (other + self).rename(self.name) radd.__doc__ = _flex_doc_SERIES.format( desc="Reverse Addition", op_name="+", equiv="other + series", reverse="add", series_examples=_add_example_SERIES, ) def div(self, other): return (self / other).rename(self.name) div.__doc__ = _flex_doc_SERIES.format( desc="Floating division", op_name="/", equiv="series / other", reverse="rdiv", series_examples=_div_example_SERIES, ) divide = div def rdiv(self, other): return (other / self).rename(self.name) rdiv.__doc__ = _flex_doc_SERIES.format( desc="Reverse Floating division", op_name="/", equiv="other / series", reverse="div", series_examples=_div_example_SERIES, ) def truediv(self, other): return (self / other).rename(self.name) truediv.__doc__ = _flex_doc_SERIES.format( desc="Floating division", op_name="/", equiv="series / other", reverse="rtruediv", series_examples=_div_example_SERIES, ) def rtruediv(self, other): return (other / self).rename(self.name) rtruediv.__doc__ = _flex_doc_SERIES.format( desc="Reverse Floating division", op_name="/", equiv="other / series", reverse="truediv", series_examples=_div_example_SERIES, ) def mul(self, other): return (self * other).rename(self.name) mul.__doc__ = _flex_doc_SERIES.format( desc="Multiplication", op_name="*", equiv="series * other", reverse="rmul", series_examples=_mul_example_SERIES, ) multiply = mul def rmul(self, other): return (other * self).rename(self.name) rmul.__doc__ = _flex_doc_SERIES.format( desc="Reverse Multiplication", op_name="*", equiv="other * series", reverse="mul", series_examples=_mul_example_SERIES, ) def sub(self, other): return (self - other).rename(self.name) sub.__doc__ = _flex_doc_SERIES.format( desc="Subtraction", op_name="-", equiv="series - other", reverse="rsub", series_examples=_sub_example_SERIES, ) subtract = sub def rsub(self, other): return (other - self).rename(self.name) rsub.__doc__ = _flex_doc_SERIES.format( desc="Reverse Subtraction", op_name="-", equiv="other - series", reverse="sub", series_examples=_sub_example_SERIES, ) def mod(self, other): return (self % other).rename(self.name) mod.__doc__ = _flex_doc_SERIES.format( desc="Modulo", op_name="%", equiv="series % other", reverse="rmod", series_examples=_mod_example_SERIES, ) def rmod(self, other): return (other % self).rename(self.name) rmod.__doc__ = _flex_doc_SERIES.format( desc="Reverse Modulo", op_name="%", equiv="other % series", reverse="mod", series_examples=_mod_example_SERIES, ) def pow(self, other): return (self ** other).rename(self.name) pow.__doc__ = _flex_doc_SERIES.format( desc="Exponential power of series", op_name="**", equiv="series ** other", reverse="rpow", series_examples=_pow_example_SERIES, ) def rpow(self, other): return (other ** self).rename(self.name) rpow.__doc__ = _flex_doc_SERIES.format( desc="Reverse Exponential power", op_name="**", equiv="other ** series", reverse="pow", series_examples=_pow_example_SERIES, ) def floordiv(self, other): return (self // other).rename(self.name) floordiv.__doc__ = _flex_doc_SERIES.format( desc="Integer division", op_name="//", equiv="series // other", reverse="rfloordiv", series_examples=_floordiv_example_SERIES, ) def rfloordiv(self, other): return (other // self).rename(self.name) rfloordiv.__doc__ = _flex_doc_SERIES.format( desc="Reverse Integer division", op_name="//", equiv="other // series", reverse="floordiv", series_examples=_floordiv_example_SERIES, ) # Comparison Operators def eq(self, other): """ Compare if the current value is equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.a == 1 a True b False c False d False Name: a, dtype: bool >>> df.b.eq(1) a True b False c True d False Name: b, dtype: bool """ return (self == other).rename(self.name) equals = eq def gt(self, other): """ Compare if the current value is greater than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.a > 1 a False b True c True d True Name: a, dtype: bool >>> df.b.gt(1) a False b False c False d False Name: b, dtype: bool """ return (self > other).rename(self.name) def ge(self, other): """ Compare if the current value is greater than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.a >= 2 a False b True c True d True Name: a, dtype: bool >>> df.b.ge(2) a False b False c False d False Name: b, dtype: bool """ return (self >= other).rename(self.name) def lt(self, other): """ Compare if the current value is less than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.a < 1 a False b False c False d False Name: a, dtype: bool >>> df.b.lt(2) a True b False c True d False Name: b, dtype: bool """ return (self < other).rename(self.name) def le(self, other): """ Compare if the current value is less than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.a <= 2 a True b True c False d False Name: a, dtype: bool >>> df.b.le(2) a True b False c True d False Name: b, dtype: bool """ return (self <= other).rename(self.name) def ne(self, other): """ Compare if the current value is not equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.a != 1 a False b True c True d True Name: a, dtype: bool >>> df.b.ne(1) a False b True c False d True Name: b, dtype: bool """ return (self != other).rename(self.name) def divmod(self, other): """ Return Integer division and modulo of series and other, element-wise (binary operator `divmod`). Parameters ---------- other : Series or scalar value Returns ------- Series The result of the operation. See Also -------- Series.rdivmod """ return (self.floordiv(other), self.mod(other)) def rdivmod(self, other): """ Return Integer division and modulo of series and other, element-wise (binary operator `rdivmod`). Parameters ---------- other : Series or scalar value Returns ------- Series The result of the operation. See Also -------- Series.divmod """ return (self.rfloordiv(other), self.rmod(other)) def between(self, left, right, inclusive=True): """ Return boolean Series equivalent to left <= series <= right. This function returns a boolean vector containing `True` wherever the corresponding Series element is between the boundary values `left` and `right`. NA values are treated as `False`. Parameters ---------- left : scalar or list-like Left boundary. right : scalar or list-like Right boundary. inclusive : bool, default True Include boundaries. Returns ------- Series Series representing whether each element is between left and right (inclusive). See Also -------- Series.gt : Greater than of series and other. Series.lt : Less than of series and other. Notes ----- This function is equivalent to ``(left <= ser) & (ser <= right)`` Examples -------- >>> s = ks.Series([2, 0, 4, 8, np.nan]) Boundary values are included by default: >>> s.between(1, 4) 0 True 1 False 2 True 3 False 4 False Name: 0, dtype: bool With `inclusive` set to ``False`` boundary values are excluded: >>> s.between(1, 4, inclusive=False) 0 True 1 False 2 False 3 False 4 False Name: 0, dtype: bool `left` and `right` can be any scalar value: >>> s = ks.Series(['Alice', 'Bob', 'Carol', 'Eve']) >>> s.between('Anna', 'Daniel') 0 False 1 True 2 True 3 False Name: 0, dtype: bool """ if inclusive: lmask = self >= left rmask = self <= right else: lmask = self > left rmask = self < right return lmask & rmask # TODO: arg should support Series # TODO: NaN and None def map(self, arg): """ Map values of Series according to input correspondence. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict``. .. note:: make sure the size of the dictionary is not huge because it could downgrade the performance or throw OutOfMemoryError due to a huge expression within Spark. Consider the input as a functions as an alternative instead in this case. Parameters ---------- arg : function or dict Mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``None``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``None``. Examples -------- >>> s = ks.Series(['cat', 'dog', None, 'rabbit']) >>> s 0 cat 1 dog 2 None 3 rabbit Name: 0, dtype: object ``map`` accepts a ``dict``. Values that are not found in the ``dict`` are converted to ``None``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 None 3 None Name: 0, dtype: object It also accepts a function: >>> def format(x) -> str: ... return 'I am a {}'.format(x) >>> s.map(format) 0 I am a cat 1 I am a dog 2 I am a None 3 I am a rabbit Name: 0, dtype: object """ if isinstance(arg, dict): is_start = True # In case dictionary is empty. current = F.when(F.lit(False), F.lit(None).cast(self.spark_type)) for to_replace, value in arg.items(): if is_start: current = F.when(self.spark_column == F.lit(to_replace), value) is_start = False else: current = current.when(self.spark_column == F.lit(to_replace), value) if hasattr(arg, "__missing__"): tmp_val = arg[np._NoValue] del arg[np._NoValue] # Remove in case it's set in defaultdict. current = current.otherwise(F.lit(tmp_val)) else: current = current.otherwise(F.lit(None).cast(self.spark_type)) return self._with_new_scol(current).rename(self.name) else: return self.apply(arg) def astype(self, dtype) -> "Series": """ Cast a Koalas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> ser = ks.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 Name: 0, dtype: int32 >>> ser.astype('int64') 0 1 1 2 Name: 0, dtype: int64 """ from databricks.koalas.typedef import as_spark_type spark_type = as_spark_type(dtype) if not spark_type: raise ValueError("Type {} not understood".format(dtype)) if isinstance(spark_type, BooleanType): if isinstance(self.spark_type, StringType): scol = F.when(self.spark_column.isNull(), F.lit(False)).otherwise( F.length(self.spark_column) > 0 ) elif isinstance(self.spark_type, (FloatType, DoubleType)): scol = F.when( self.spark_column.isNull() | F.isnan(self.spark_column), F.lit(True) ).otherwise(self.spark_column.cast(spark_type)) else: scol = F.when(self.spark_column.isNull(), F.lit(False)).otherwise( self.spark_column.cast(spark_type) ) else: scol = self.spark_column.cast(spark_type) return self._with_new_scol(scol) def alias(self, name): """An alias for :meth:`Series.rename`.""" return self.rename(name) @property def shape(self): """Return a tuple of the shape of the underlying data.""" return (len(self),) @property def name(self) -> Union[str, Tuple[str, ...]]: """Return name of the Series.""" name = self._internal.column_labels[0] # type: ignore if name is not None and len(name) == 1: return name[0] else: return name @name.setter def name(self, name: Union[str, Tuple[str, ...]]): self.rename(name, inplace=True) # TODO: Functionality and documentation should be matched. Currently, changing index labels # taking dictionary and function to change index are not supported. def rename(self, index: Union[str, Tuple[str, ...]] = None, **kwargs): """ Alter Series name. Parameters ---------- index : scalar Scalar will alter the ``Series.name`` attribute. inplace : bool, default False Whether to return a new Series. If True then value of copy is ignored. Returns ------- Series Series with name altered. Examples -------- >>> s = ks.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 Name: 0, dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 """ if index is None: scol = self.spark_column else: scol = self.spark_column.alias(name_like_string(index)) internal = self._internal.copy( # type: ignore spark_column=scol, column_labels=[index if index is None or isinstance(index, tuple) else (index,)], ) if kwargs.get("inplace", False): self._internal = internal return self else: return Series(internal, anchor=self._kdf) @property def index(self): """The index (axis labels) Column of the Series. See Also -------- Index """ return self._kdf.index @property def is_unique(self): """ Return boolean if values in the object are unique Returns ------- is_unique : boolean >>> ks.Series([1, 2, 3]).is_unique True >>> ks.Series([1, 2, 2]).is_unique False >>> ks.Series([1, 2, 3, None]).is_unique True """ scol = self.spark_column # Here we check: # 1. the distinct count without nulls and count without nulls for non-null values # 2. count null values and see if null is a distinct value. # # This workaround is in order to calculate the distinct count including nulls in # single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls. return self._internal._sdf.select( (F.count(scol) == F.countDistinct(scol)) & (F.count(F.when(scol.isNull(), 1).otherwise(None)) <= 1) ).collect()[0][0] def reset_index(self, level=None, drop=False, name=None, inplace=False): """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses self.name by default. This argument is ignored when drop is True. inplace : bool, default False Modify the Series in place (do not create a new object). Returns ------- Series or DataFrame When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. Examples -------- >>> s = ks.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 To update the Series in place, without generating a new one set `inplace` to True. Note that it also requires ``drop=True``. >>> s.reset_index(inplace=True, drop=True) >>> s 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") if inplace and not drop: raise TypeError("Cannot reset_index inplace on a Series to create a DataFrame") if name is not None: kdf = self.rename(name).to_dataframe() else: kdf = self.to_dataframe() kdf = kdf.reset_index(level=level, drop=drop) if drop: kseries = first_series(kdf) if inplace: self._internal = kseries._internal self._kdf = kseries._kdf else: return kseries else: return kdf def to_frame(self, name: Union[str, Tuple[str, ...]] = None) -> spark.DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, default None The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = ks.Series(["a", "b", "c"]) >>> s.to_frame() 0 0 a 1 b 2 c >>> s = ks.Series(["a", "b", "c"], name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ if name is not None: renamed = self.rename(name) else: renamed = self sdf = renamed._internal.to_internal_spark_frame column_labels = None # type: Optional[List[Tuple[str, ...]]] if renamed._internal.column_labels[0] is None: column_labels = [("0",)] column_label_names = None else: column_labels = renamed._internal.column_labels column_label_names = renamed._internal.column_label_names internal = InternalFrame( spark_frame=sdf, index_map=renamed._internal.index_map, column_labels=column_labels, data_spark_columns=[scol_for(sdf, sdf.columns[-1])], column_label_names=column_label_names, ) return DataFrame(internal) to_dataframe = to_frame def to_string( self, buf=None, na_rep="NaN", float_format=None, header=True, index=True, length=False, dtype=False, name=False, max_rows=None, ): """ Render a string representation of the Series. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional buffer to write to na_rep : string, optional string representation of NAN to use, default 'NaN' float_format : one-parameter function, optional formatter function to apply to columns' elements if they are floats default None header : boolean, default True Add the Series header (index name) index : bool, optional Add index (row) labels, default True length : boolean, default False Add the Series length dtype : boolean, default False Add the Series dtype name : boolean, default False Add the Series name if not None max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- formatted : string (if not buffer passed) Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats']) >>> print(df['dogs'].to_string()) 0 0.2 1 0.0 2 0.6 3 0.2 >>> print(df['dogs'].to_string(max_rows=2)) 0 0.2 1 0.0 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kseries = self.head(max_rows) else: kseries = self return validate_arguments_and_invoke_function( kseries._to_internal_pandas(), self.to_string, pd.Series.to_string, args ) def to_clipboard(self, excel=True, sep=None, **kwargs): # Docstring defined below by reusing DataFrame.to_clipboard's. args = locals() kseries = self return validate_arguments_and_invoke_function( kseries._to_internal_pandas(), self.to_clipboard, pd.Series.to_clipboard, args ) to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__ def to_dict(self, into=dict): """ Convert Series to {label -> value} dict or dict-like object. .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = ks.Series([1, 2, 3, 4]) >>> s_dict = s.to_dict() >>> sorted(s_dict.items()) [(0, 1), (1, 2), (2, 3), (3, 4)] >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) # doctest: +ELLIPSIS defaultdict(<class 'list'>, {...}) """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kseries = self return validate_arguments_and_invoke_function( kseries._to_internal_pandas(), self.to_dict, pd.Series.to_dict, args ) def to_latex( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal=".", multicolumn=None, multicolumn_format=None, multirow=None, ): args = locals() kseries = self return validate_arguments_and_invoke_function( kseries._to_internal_pandas(), self.to_latex, pd.Series.to_latex, args ) to_latex.__doc__ = DataFrame.to_latex.__doc__ def to_pandas(self): """ Return a pandas Series. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats']) >>> df['dogs'].to_pandas() 0 0.2 1 0.0 2 0.6 3 0.2 Name: dogs, dtype: float64 """ return first_series(self._internal.to_pandas_frame.copy()) # Alias to maintain backward compatibility with Spark toPandas = to_pandas def to_list(self): """ Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) .. note:: This method should only be used if the resulting list is expected to be small, as all the data is loaded into the driver's memory. """ return self._to_internal_pandas().to_list() tolist = to_list def drop_duplicates(self, keep="first", inplace=False): """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. Returns ------- Series Series with duplicates dropped. Examples -------- Generate a Series with duplicated entries. >>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s.sort_index() 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates().sort_index() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last').sort_index() 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. Setting the value of 'inplace' to ``True`` performs the operation inplace and returns ``None``. >>> s.drop_duplicates(keep=False, inplace=True) >>> s.sort_index() 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") kseries = first_series(self.to_frame().drop_duplicates(keep=keep)) if inplace: self._internal = kseries._internal self._kdf = kseries._kdf else: return kseries def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None): """Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- Series Series with NA entries filled. Examples -------- >>> s = ks.Series([np.nan, 2, 3, 4, np.nan, 6], name='x') >>> s 0 NaN 1 2.0 2 3.0 3 4.0 4 NaN 5 6.0 Name: x, dtype: float64 Replace all NaN elements with 0s. >>> s.fillna(0) 0 0.0 1 2.0 2 3.0 3 4.0 4 0.0 5 6.0 Name: x, dtype: float64 We can also propagate non-null values forward or backward. >>> s.fillna(method='ffill') 0 NaN 1 2.0 2 3.0 3 4.0 4 4.0 5 6.0 Name: x, dtype: float64 >>> s = ks.Series([np.nan, 'a', 'b', 'c', np.nan], name='x') >>> s.fillna(method='ffill') 0 None 1 a 2 b 3 c 4 c Name: x, dtype: object """ return self._fillna(value, method, axis, inplace, limit) def _fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, part_cols=()): axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, "inplace") if axis != 0: raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if (value is None) and (method is None): raise ValueError("Must specify a fillna 'value' or 'method' parameter.") if (method is not None) and (method not in ["ffill", "pad", "backfill", "bfill"]): raise ValueError("Expecting 'pad', 'ffill', 'backfill' or 'bfill'.") if self.isnull().sum() == 0: if inplace: self._internal = self._internal.copy() self._kdf = self._kdf.copy() else: return self column_name = self.name scol = self.spark_column if value is not None: if not isinstance(value, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(value)) if limit is not None: raise ValueError("limit parameter for value is not support now") scol = F.when(scol.isNull(), value).otherwise(scol) else: if method in ["ffill", "pad"]: func = F.last end = Window.currentRow - 1 if limit is not None: begin = Window.currentRow - limit else: begin = Window.unboundedPreceding elif method in ["bfill", "backfill"]: func = F.first begin = Window.currentRow + 1 if limit is not None: end = Window.currentRow + limit else: end = Window.unboundedFollowing window = ( Window.partitionBy(*part_cols) .orderBy(NATURAL_ORDER_COLUMN_NAME) .rowsBetween(begin, end) ) scol = F.when(scol.isNull(), func(scol, True).over(window)).otherwise(scol) kseries = self._with_new_scol(scol).rename(column_name) if inplace: self._internal = kseries._internal self._kdf = kseries._kdf else: return kseries def dropna(self, axis=0, inplace=False, **kwargs): """ Return a new Series with missing values removed. Parameters ---------- axis : {0 or 'index'}, default 0 There is only one axis to drop values from. inplace : bool, default False If True, do operation inplace and return None. **kwargs Not in use. Returns ------- Series Series with NA entries dropped from it. Examples -------- >>> ser = ks.Series([1., 2., np.nan]) >>> ser 0 1.0 1 2.0 2 NaN Name: 0, dtype: float64 Drop NA values from a Series. >>> ser.dropna() 0 1.0 1 2.0 Name: 0, dtype: float64 Keep the Series with valid entries in the same variable. >>> ser.dropna(inplace=True) >>> ser 0 1.0 1 2.0 Name: 0, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") # TODO: last two examples from Pandas produce different results. kseries = first_series(self.to_dataframe().dropna(axis=axis, inplace=False)) if inplace: self._internal = kseries._internal self._kdf = kseries._kdf else: return kseries def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "Series": """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- Series Series with the values outside the clip boundaries replaced Examples -------- >>> ks.Series([0, 2, 4]).clip(1, 3) 0 1 1 2 2 3 Name: 0, dtype: int64 Notes ----- One difference between this implementation and pandas is that running `pd.Series(['a', 'b']).clip(0, 1)` will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while `ks.Series(['a', 'b']).clip(0, 1)` will output the original Series, simply ignoring the incompatible types. """ return first_series(self.to_dataframe().clip(lower, upper)) def drop( self, labels=None, index: Union[str, Tuple[str, ...], List[str], List[Tuple[str, ...]]] = None, level=None, ): """ Return Series with specified index labels removed. Remove elements of a Series based on specifying the index labels. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index labels to drop. index : None Redundant for application on Series, but index can be used instead of labels. level : int or level name, optional For MultiIndex, level for which the labels will be removed. Returns ------- Series Series with specified index labels removed. See Also -------- Series.dropna Examples -------- >>> s = ks.Series(data=np.arange(3), index=['A', 'B', 'C']) >>> s A 0 B 1 C 2 Name: 0, dtype: int64 Drop single label A >>> s.drop('A') B 1 C 2 Name: 0, dtype: int64 Drop labels B and C >>> s.drop(labels=['B', 'C']) A 0 Name: 0, dtype: int64 With 'index' rather than 'labels' returns exactly same result. >>> s.drop(index='A') B 1 C 2 Name: 0, dtype: int64 >>> s.drop(index=['B', 'C']) A 0 Name: 0, dtype: int64 Also support for MultiIndex >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], ... index=midx) >>> s lama speed 45.0 weight 200.0 length 1.2 cow speed 30.0 weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 >>> s.drop(labels='weight', level=1) lama speed 45.0 length 1.2 cow speed 30.0 length 1.5 falcon speed 320.0 length 0.3 Name: 0, dtype: float64 >>> s.drop(('lama', 'weight')) lama speed 45.0 length 1.2 cow speed 30.0 weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 >>> s.drop([('lama', 'speed'), ('falcon', 'weight')]) lama weight 200.0 length 1.2 cow speed 30.0 weight 250.0 length 1.5 falcon speed 320.0 length 0.3 Name: 0, dtype: float64 """ level_param = level if labels is not None: if index is not None: raise ValueError("Cannot specify both 'labels' and 'index'") return self.drop(index=labels, level=level) if index is not None: if not isinstance(index, (str, tuple, list)): raise ValueError("'index' type should be one of str, list, tuple") if level is None: level = 0 if level >= len(self._internal.index_spark_columns): raise ValueError("'level' should be less than the number of indexes") if isinstance(index, str): index = [(index,)] # type: ignore elif isinstance(index, tuple): index = [index] else: if not ( all((isinstance(idxes, str) for idxes in index)) or all((isinstance(idxes, tuple) for idxes in index)) ): raise ValueError( "If the given index is a list, it " "should only contains names as strings, " "or a list of tuples that contain " "index names as strings" ) new_index = [] for idxes in index: if isinstance(idxes, tuple): new_index.append(idxes) else: new_index.append((idxes,)) index = new_index drop_index_scols = [] for idxes in index: try: index_scols = [ self._internal.index_spark_columns[lvl] == idx for lvl, idx in enumerate(idxes, level) ] except IndexError: if level_param is None: raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(self._internal.index_spark_columns), len(idxes) ) ) else: return self drop_index_scols.append(reduce(lambda x, y: x & y, index_scols)) cond = ~reduce(lambda x, y: x | y, drop_index_scols) return first_series(DataFrame(self._internal.with_filter(cond))) else: raise ValueError("Need to specify at least one of 'labels' or 'index'") def head(self, n: int = 5) -> "Series": """ Return the first n rows. This function returns the first n rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : Integer, default = 5 Returns ------- The first n rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']}) >>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE 0 alligator 1 bee Name: animal, dtype: object """ return first_series(self.to_dataframe().head(n)) # TODO: Categorical type isn't supported (due to PySpark's limitation) and # some doctests related with timestamps were not added. def unique(self): """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. .. note:: This method returns newly creased Series whereas Pandas returns the unique values as a NumPy array. Returns ------- Returns the unique values as a Series. See Also -------- Index.unique groupby.SeriesGroupBy.unique Examples -------- >>> kser = ks.Series([2, 1, 3, 3], name='A') >>> kser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS <BLANKLINE> ... 1 ... 2 ... 3 Name: A, dtype: int64 >>> ks.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() 0 2016-01-01 Name: 0, dtype: datetime64[ns] >>> kser.name = ('x', 'a') >>> kser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS <BLANKLINE> ... 1 ... 2 ... 3 Name: (x, a), dtype: int64 """ sdf = self._internal.spark_frame.select(self.spark_column).distinct() internal = InternalFrame( spark_frame=sdf, index_map=None, column_labels=[self._internal.column_labels[0]], data_spark_columns=[scol_for(sdf, self._internal.data_spark_column_names[0])], column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal)) def sort_values( self, ascending: bool = True, inplace: bool = False, na_position: str = "last" ) -> Union["Series", None]: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : Series ordered by values. Examples -------- >>> s = ks.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 Name: 0, dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN Name: 0, dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN Name: 0, dtype: float64 Sort values inplace >>> s.sort_values(ascending=False, inplace=True) >>> s 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN Name: 0, dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 Name: 0, dtype: float64 Sort a series of strings >>> s = ks.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c Name: 0, dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z Name: 0, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") kseries = first_series( self.to_dataframe().sort_values( by=self.name, ascending=ascending, na_position=na_position ) ) if inplace: self._internal = kseries._internal self._kdf = kseries._kdf return None else: return kseries def sort_index( self, axis: int = 0, level: Optional[Union[int, List[int]]] = None, ascending: bool = True, inplace: bool = False, kind: str = None, na_position: str = "last", ) -> Optional["Series"]: """ Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None Koalas does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. Returns ------- sorted_obj : Series Examples -------- >>> df = ks.Series([2, 1, np.nan], index=['b', 'a', np.nan]) >>> df.sort_index() a 1.0 b 2.0 NaN NaN Name: 0, dtype: float64 >>> df.sort_index(ascending=False) b 2.0 a 1.0 NaN NaN Name: 0, dtype: float64 >>> df.sort_index(na_position='first') NaN NaN a 1.0 b 2.0 Name: 0, dtype: float64 >>> df.sort_index(inplace=True) >>> df a 1.0 b 2.0 NaN NaN Name: 0, dtype: float64 >>> df = ks.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0') >>> df.sort_index() a 0 3 1 2 b 0 1 1 0 Name: 0, dtype: int64 >>> df.sort_index(level=1) # doctest: +SKIP a 0 3 b 0 1 a 1 2 b 1 0 Name: 0, dtype: int64 >>> df.sort_index(level=[1, 0]) a 0 3 b 0 1 a 1 2 b 1 0 Name: 0, dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") kseries = first_series( self.to_dataframe().sort_index( axis=axis, level=level, ascending=ascending, kind=kind, na_position=na_position ) ) if inplace: self._internal = kseries._internal self._kdf = kseries._kdf return None else: return kseries def add_prefix(self, prefix): """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- Series New Series with updated labels. See Also -------- Series.add_suffix: Suffix column labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = ks.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 Name: 0, dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 Name: 0, dtype: int64 """ assert isinstance(prefix, str) kdf = self.to_dataframe() internal = kdf._internal sdf = internal.spark_frame sdf = sdf.select( [ F.concat(F.lit(prefix), scol_for(sdf, index_column)).alias(index_column) for index_column in internal.index_spark_column_names ] + internal.data_spark_columns ) kdf._internal = internal.with_new_sdf(sdf) return first_series(kdf) def add_suffix(self, suffix): """ Suffix labels with string suffix. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series New Series with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = ks.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 Name: 0, dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 Name: 0, dtype: int64 """ assert isinstance(suffix, str) kdf = self.to_dataframe() internal = kdf._internal sdf = internal.spark_frame sdf = sdf.select( [ F.concat(scol_for(sdf, index_column), F.lit(suffix)).alias(index_column) for index_column in internal.index_spark_column_names ] + internal.data_spark_columns ) kdf._internal = internal.with_new_sdf(sdf) return first_series(kdf) def corr(self, other, method="pearson"): """ Compute correlation with `other` Series, excluding missing values. Parameters ---------- other : Series method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- correlation : float Examples -------- >>> df = ks.DataFrame({'s1': [.2, .0, .6, .2], ... 's2': [.3, .6, .0, .1]}) >>> s1 = df.s1 >>> s2 = df.s2 >>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS -0.851064... >>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS -0.948683... Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ # This implementation is suboptimal because it computes more than necessary, # but it should be a start columns = ["__corr_arg1__", "__corr_arg2__"] kdf = self._kdf.assign(__corr_arg1__=self, __corr_arg2__=other)[columns] kdf.columns = columns c = corr(kdf, method=method) return c.loc[tuple(columns)] def nsmallest(self, n: int = 5) -> "Series": """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Examples -------- >>> data = [1, 2, 3, 4, np.nan ,6, 7, 8] >>> s = ks.Series(data) >>> s 0 1.0 1 2.0 2 3.0 3 4.0 4 NaN 5 6.0 6 7.0 7 8.0 Name: 0, dtype: float64 The `n` largest elements where ``n=5`` by default. >>> s.nsmallest() 0 1.0 1 2.0 2 3.0 3 4.0 5 6.0 Name: 0, dtype: float64 >>> s.nsmallest(3) 0 1.0 1 2.0 2 3.0 Name: 0, dtype: float64 """ return first_series(self.to_frame().nsmallest(n=n, columns=self.name)) def nlargest(self, n: int = 5) -> "Series": """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Examples -------- >>> data = [1, 2, 3, 4, np.nan ,6, 7, 8] >>> s = ks.Series(data) >>> s 0 1.0 1 2.0 2 3.0 3 4.0 4 NaN 5 6.0 6 7.0 7 8.0 Name: 0, dtype: float64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() 7 8.0 6 7.0 5 6.0 3 4.0 2 3.0 Name: 0, dtype: float64 >>> s.nlargest(n=3) 7 8.0 6 7.0 5 6.0 Name: 0, dtype: float64 """ return first_series(self.to_frame().nlargest(n=n, columns=self.name)) def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- nobs : int Examples -------- Constructing DataFrame from a dictionary: >>> df = ks.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26]}) Notice the uncounted NA values: >>> df['Person'].count() 5 >>> df['Age'].count() 4 """ return self._reduce_for_stat_function(Frame._count_expr, name="count") def append( self, to_append: "Series", ignore_index: bool = False, verify_integrity: bool = False ) -> "Series": """ Concatenate two or more Series. Parameters ---------- to_append : Series or list/tuple of Series ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise Exception on creating index with duplicates Returns ------- appended : Series Examples -------- >>> s1 = ks.Series([1, 2, 3]) >>> s2 = ks.Series([4, 5, 6]) >>> s3 = ks.Series([4, 5, 6], index=[3,4,5]) >>> s1.append(s2) 0 1 1 2 2 3 0 4 1 5 2 6 Name: 0, dtype: int64 >>> s1.append(s3) 0 1 1 2 2 3 3 4 4 5 5 6 Name: 0, dtype: int64 With ignore_index set to True: >>> s1.append(s2, ignore_index=True) 0 1 1 2 2 3 3 4 4 5 5 6 Name: 0, dtype: int64 """ return first_series( self.to_dataframe().append(to_append.to_dataframe(), ignore_index, verify_integrity) ) def sample( self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None, ) -> "Series": return first_series( self.to_dataframe().sample(n=n, frac=frac, replace=replace, random_state=random_state) ) sample.__doc__ = DataFrame.sample.__doc__ def hist(self, bins=10, **kwds): return self.plot.hist(bins, **kwds) hist.__doc__ = KoalasSeriesPlotMethods.hist.__doc__ def apply(self, func, args=(), **kwds): """ Invoke function on values of Series. Can be a Python function that only works on the Series. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. Parameters ---------- func : function Python function to apply. Note that type hint for return type is required. args : tuple Positional arguments passed to func after the series value. **kwds Additional keyword arguments passed to func. Returns ------- Series See Also -------- Series.aggregate : Only perform aggregating type operations. Series.transform : Only perform transforming type operations. DataFrame.apply : The equivalent function for DataFrame. Examples -------- Create a Series with typical summer temperatures for each city. >>> s = ks.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 Name: 0, dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x) -> np.int64: ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 Name: 0, dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword >>> def subtract_custom_value(x, custom_value) -> np.int64: ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 Name: 0, dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply`` >>> def add_custom_values(x, **kwargs) -> np.int64: ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 Name: 0, dtype: int64 Use a function from the Numpy library >>> def numpy_log(col) -> np.float64: ... return np.log(col) >>> s.apply(numpy_log) London 2.995732 New York 3.044522 Helsinki 2.484907 Name: 0, dtype: float64 You can omit the type hint and let Koalas infer its type. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 Name: 0, dtype: float64 """ assert callable(func), "the first argument should be a callable function." try: spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None except TypeError: # Falls back to schema inference if it fails to get signature. should_infer_schema = True apply_each = wraps(func)(lambda s: s.apply(func, args=args, **kwds)) if should_infer_schema: # TODO: In this case, it avoids the shortcut for now (but only infers schema) # because it returns a series from a different DataFrame and it has a different # anchor. We should fix this to allow the shortcut or only allow to infer # schema. limit = get_option("compute.shortcut_limit") pser = self.head(limit)._to_internal_pandas() transformed = pser.apply(func, *args, **kwds) kser = Series(transformed) return self._transform_batch(apply_each, kser.spark_type) else: sig_return = infer_return_type(func) if not isinstance(sig_return, ScalarType): raise ValueError( "Expected the return type of this function to be of scalar type, " "but found type {}".format(sig_return) ) return_schema = sig_return.tpe return self._transform_batch(apply_each, return_schema) # TODO: not all arguments are implemented comparing to Pandas' for now. def aggregate(self, func: Union[str, List[str]]): """Aggregate using one or more operations over the specified axis. Parameters ---------- func : str or a list of str function name(s) as string apply to series. Returns ------- scalar, Series The return can be: - scalar : when Series.agg is called with single function - Series : when Series.agg is called with several functions Notes ----- `agg` is an alias for `aggregate`. Use the alias. See Also -------- Series.apply : Invoke function on a Series. Series.transform : Only perform transforming type operations. Series.groupby : Perform operations over groups. DataFrame.aggregate : The equivalent function for DataFrame. Examples -------- >>> s = ks.Series([1, 2, 3, 4]) >>> s.agg('min') 1 >>> s.agg(['min', 'max']) max 4 min 1 Name: 0, dtype: int64 """ if isinstance(func, list): return self.to_frame().agg(func)[self.name] elif isinstance(func, str): return getattr(self, func)() else: raise ValueError("func must be a string or list of strings") agg = aggregate def transpose(self, *args, **kwargs): """ Return the transpose, which is by definition self. Examples -------- It returns the same object as the transpose of the given series object, which is by definition self. >>> s = ks.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 Name: 0, dtype: int64 >>> s.transpose() 0 1 1 2 2 3 Name: 0, dtype: int64 """ return Series(self._internal.copy(), anchor=self._kdf) T = property(transpose) def transform(self, func, axis=0, *args, **kwargs): """ Call ``func`` producing the same type as `self` with transformed values and that has the same axis length as input. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. Parameters ---------- func : function or list A function or a list of functions to use for transforming the data. axis : int, default 0 or 'index' Can only be set to 0 at the moment. *args Positional arguments to pass to `func`. **kwargs Keyword arguments to pass to `func`. Returns ------- An instance of the same type with `self` that must have the same length as input. See Also -------- Series.aggregate : Only perform aggregating type operations. Series.apply : Invoke function on Series. DataFrame.transform : The equivalent function for DataFrame. Examples -------- >>> s = ks.Series(range(3)) >>> s 0 0 1 1 2 2 Name: 0, dtype: int64 >>> def sqrt(x) -> float: ... return np.sqrt(x) >>> s.transform(sqrt) 0 0.000000 1 1.000000 2 1.414214 Name: 0, dtype: float32 Even though the resulting instance must have the same length as the input, it is possible to provide several input functions: >>> def exp(x) -> float: ... return np.exp(x) >>> s.transform([sqrt, exp]) sqrt exp 0 0.000000 1.000000 1 1.000000 2.718282 2 1.414214 7.389056 You can omit the type hint and let Koalas infer its type. >>> s.transform([np.sqrt, np.exp]) sqrt exp 0 0.000000 1.000000 1 1.000000 2.718282 2 1.414214 7.389056 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if isinstance(func, list): applied = [] for f in func: applied.append(self.apply(f, args=args, **kwargs).rename(f.__name__)) internal = self._internal.with_new_columns(applied) return DataFrame(internal) else: return self.apply(func, args=args, **kwargs) def transform_batch(self, func, *args, **kwargs) -> "ks.Series": """ Transform the data with the function that takes pandas Series and outputs pandas Series. The pandas Series given to the function is of a batch used internally. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: the `func` is unable to access to the whole input series. Koalas internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole frame but of the batch internally ... # used. ... def length(pser) -> ks.Series[int]: ... return pd.Series([len(pser)] * len(pser)) ... >>> df = ks.DataFrame({'A': range(1000)}) >>> df.A.transform_batch(length) # doctest: +SKIP c0 0 83 1 83 2 83 ... .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def plus_one(x) -> ks.Series[int]: ... return x + 1 Parameters ---------- func : function Function to apply to each pandas frame. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame See Also -------- DataFrame.apply_batch : Similar but it takes pandas DataFrame as its internal batch. Examples -------- >>> df = ks.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B']) >>> df A B 0 1 2 1 3 4 2 5 6 >>> def plus_one_func(pser) -> ks.Series[np.int64]: ... return pser + 1 >>> df.A.transform_batch(plus_one_func) 0 2 1 4 2 6 Name: A, dtype: int64 You can also omit the type hints so Koalas infers the return schema as below: >>> df.A.transform_batch(lambda pser: pser + 1) 0 2 1 4 2 6 Name: A, dtype: int64 You can also specify extra arguments. >>> def plus_one_func(pser, a, b, c=3) -> ks.Series[np.int64]: ... return pser + a + b + c >>> df.A.transform_batch(plus_one_func, 1, b=2) 0 7 1 9 2 11 Name: A, dtype: int64 You can also use ``np.ufunc`` as input. >>> df.A.transform_batch(np.add, 10) 0 11 1 13 2 15 Name: A, dtype: int64 """ assert callable(func), "the first argument should be a callable function." return_sig = None try: spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) except TypeError: # Falls back to schema inference if it fails to get signature. pass return_schema = None if return_sig is not None: # Extract the signature arguments from this function. sig_return = infer_return_type(func) if not isinstance(sig_return, SeriesType): raise ValueError( "Expected the return type of this function to be of type column," " but found type {}".format(sig_return) ) return_schema = sig_return.tpe ff = func func = lambda o: ff(o, *args, **kwargs) return self._transform_batch(func, return_schema) def _transform_batch(self, func, return_schema): if isinstance(func, np.ufunc): f = func func = lambda *args, **kwargs: f(*args, **kwargs) if return_schema is None: # TODO: In this case, it avoids the shortcut for now (but only infers schema) # because it returns a series from a different DataFrame and it has a different # anchor. We should fix this to allow the shortcut or only allow to infer # schema. limit = get_option("compute.shortcut_limit") pser = self.head(limit)._to_internal_pandas() transformed = pser.transform(func) kser = Series(transformed) spark_return_type = kser.spark_type else: spark_return_type = return_schema pudf = pandas_udf(func, returnType=spark_return_type, functionType=PandasUDFType.SCALAR) return self._with_new_scol(scol=pudf(self.spark_column)).rename(self.name) def round(self, decimals=0): """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Series object See Also -------- DataFrame.round Examples -------- >>> df = ks.Series([0.028208, 0.038683, 0.877076], name='x') >>> df 0 0.028208 1 0.038683 2 0.877076 Name: x, dtype: float64 >>> df.round(2) 0 0.03 1 0.04 2 0.88 Name: x, dtype: float64 """ if not isinstance(decimals, int): raise ValueError("decimals must be an integer") column_name = self.name scol = F.round(self.spark_column, decimals) return self._with_new_scol(scol).rename(column_name) # TODO: add 'interpolation' parameter. def quantile(self, q=0.5, accuracy=10000): """ Return value at the given quantile. .. note:: Unlike pandas', the quantile in Koalas is an approximated quantile based upon approximate percentile computation because computing quantile across a large dataset is extremely expensive. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- float or Series If the current object is a Series and ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. Examples -------- >>> s = ks.Series([1, 2, 3, 4, 5]) >>> s.quantile(.5) 3 >>> s.quantile([.25, .5, .75]) 0.25 2 0.5 3 0.75 4 Name: 0, dtype: int64 """ if not isinstance(accuracy, int): raise ValueError("accuracy must be an integer; however, got [%s]" % type(accuracy)) if isinstance(q, Iterable): q = list(q) for v in q if isinstance(q, list) else [q]: if not isinstance(v, float): raise ValueError( "q must be a float of an array of floats; however, [%s] found." % type(v) ) if v < 0.0 or v > 1.0: raise ValueError("percentiles should all be in the interval [0, 1].") if isinstance(q, list): quantiles = q # TODO: avoid to use dataframe. After this, anchor will be lost. # First calculate the percentiles and map it to each `quantiles` # by creating each entry as a struct. So, it becomes an array of # structs as below: # # +--------------------------------+ # | arrays | # +--------------------------------+ # |[[0.25, 2], [0.5, 3], [0.75, 4]]| # +--------------------------------+ sdf = self._internal._sdf args = ", ".join(map(str, quantiles)) percentile_col = F.expr( "approx_percentile(`%s`, array(%s), %s)" % (self.name, args, accuracy) ) sdf = sdf.select(percentile_col.alias("percentiles")) internal_index_column = SPARK_DEFAULT_INDEX_NAME value_column = "value" cols = [] for i, quantile in enumerate(quantiles): cols.append( F.struct( F.lit("%s" % quantile).alias(internal_index_column), F.expr("percentiles[%s]" % i).alias(value_column), ) ) sdf = sdf.select(F.array(*cols).alias("arrays")) # And then, explode it and manually set the index. # # +-----------------+-----+ # |__index_level_0__|value| # +-----------------+-----+ # | 0.25 | 2| # | 0.5 | 3| # | 0.75 | 4| # +-----------------+-----+ sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_map=OrderedDict({internal_index_column: None}), column_labels=None, data_spark_columns=[scol_for(sdf, value_column)], column_label_names=None, ) return DataFrame(internal)[value_column].rename(self.name) else: return self._reduce_for_stat_function( lambda _: F.expr("approx_percentile(`%s`, %s, %s)" % (self.name, q, accuracy)), name="median", ) # TODO: add axis, numeric_only, pct, na_option parameter def rank(self, method="average", ascending=True): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. .. note:: the current implementation of rank uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) Returns ------- ranks : same type as caller Examples -------- >>> s = ks.Series([1, 2, 2, 3], name='A') >>> s 0 1 1 2 2 2 3 3 Name: A, dtype: int64 >>> s.rank() 0 1.0 1 2.5 2 2.5 3 4.0 Name: A, dtype: float64 If method is set to 'min', it use lowest rank in group. >>> s.rank(method='min') 0 1.0 1 2.0 2 2.0 3 4.0 Name: A, dtype: float64 If method is set to 'max', it use highest rank in group. >>> s.rank(method='max') 0 1.0 1 3.0 2 3.0 3 4.0 Name: A, dtype: float64 If method is set to 'first', it is assigned rank in order without groups. >>> s.rank(method='first') 0 1.0 1 2.0 2 3.0 3 4.0 Name: A, dtype: float64 If method is set to 'dense', it leaves no gaps in group. >>> s.rank(method='dense') 0 1.0 1 2.0 2 2.0 3 3.0 Name: A, dtype: float64 """ return self._rank(method, ascending) def _rank(self, method="average", ascending=True, part_cols=()): if method not in ["average", "min", "max", "first", "dense"]: msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'" raise ValueError(msg) if len(self._internal.index_spark_column_names) > 1: raise ValueError("rank do not support index now") if ascending: asc_func = lambda scol: scol.asc() else: asc_func = lambda scol: scol.desc() if method == "first": window = ( Window.orderBy( asc_func(self._internal.spark_column), asc_func(F.col(NATURAL_ORDER_COLUMN_NAME)), ) .partitionBy(*part_cols) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) scol = F.row_number().over(window) elif method == "dense": window = ( Window.orderBy(asc_func(self._internal.spark_column)) .partitionBy(*part_cols) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) scol = F.dense_rank().over(window) else: if method == "average": stat_func = F.mean elif method == "min": stat_func = F.min elif method == "max": stat_func = F.max window1 = ( Window.orderBy(asc_func(self._internal.spark_column)) .partitionBy(*part_cols) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) window2 = Window.partitionBy( [self._internal.spark_column] + list(part_cols) ).rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing) scol = stat_func(F.row_number().over(window1)).over(window2) kser = self._with_new_scol(scol).rename(self.name) return kser.astype(np.float64) def describe(self, percentiles: Optional[List[float]] = None) -> "Series": return first_series(self.to_dataframe().describe(percentiles)) describe.__doc__ = DataFrame.describe.__doc__ def diff(self, periods=1): """ First discrete difference of element. Calculates the difference of a Series element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. Returns ------- diffed : DataFrame Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.b.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 Name: b, dtype: float64 Difference with previous value >>> df.c.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 15.0 4 21.0 5 27.0 Name: c, dtype: float64 Difference with following value >>> df.c.diff(periods=-1) 0 -3.0 1 -5.0 2 -7.0 3 -9.0 4 -11.0 5 NaN Name: c, dtype: float64 """ return self._diff(periods) def _diff(self, periods, part_cols=()): if not isinstance(periods, int): raise ValueError("periods should be an int; however, got [%s]" % type(periods)) window = ( Window.partitionBy(*part_cols) .orderBy(NATURAL_ORDER_COLUMN_NAME) .rowsBetween(-periods, -periods) ) scol = self.spark_column - F.lag(self.spark_column, periods).over(window) return self._with_new_scol(scol).rename(self.name) def idxmax(self, skipna=True): """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- Series.idxmin : Return index *label* of the first occurrence of minimum of values. Examples -------- >>> s = ks.Series(data=[1, None, 4, 3, 5], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 5.0 Name: 0, dtype: float64 >>> s.idxmax() 'E' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan In case of multi-index, you get a tuple: >>> index = pd.MultiIndex.from_arrays([ ... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second')) >>> s = ks.Series(data=[1, None, 4, 5], index=index) >>> s first second a c 1.0 d NaN b e 4.0 f 5.0 Name: 0, dtype: float64 >>> s.idxmax() ('b', 'f') If multiple values equal the maximum, the first row label with that value is returned. >>> s = ks.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) >>> s 10 1 3 100 5 1 2 100 1 1 8 100 Name: 0, dtype: int64 >>> s.idxmax() 3 """ sdf = self._internal.spark_frame scol = self.spark_column index_scols = self._internal.index_spark_columns # desc_nulls_(last|first) is used via Py4J directly because # it's not supported in Spark 2.3. if skipna: sdf = sdf.orderBy(Column(scol._jc.desc_nulls_last()), NATURAL_ORDER_COLUMN_NAME) else: sdf = sdf.orderBy(Column(scol._jc.desc_nulls_first()), NATURAL_ORDER_COLUMN_NAME) results = sdf.select([scol] + index_scols).take(1) if len(results) == 0: raise ValueError("attempt to get idxmin of an empty sequence") if results[0][0] is None: # This will only happens when skipna is False because we will # place nulls first. return np.nan values = list(results[0][1:]) if len(values) == 1: return values[0] else: return tuple(values) def idxmin(self, skipna=True): """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = ks.Series(data=[1, None, 4, 0], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 0.0 Name: 0, dtype: float64 >>> s.idxmin() 'D' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan In case of multi-index, you get a tuple: >>> index = pd.MultiIndex.from_arrays([ ... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second')) >>> s = ks.Series(data=[1, None, 4, 0], index=index) >>> s first second a c 1.0 d NaN b e 4.0 f 0.0 Name: 0, dtype: float64 >>> s.idxmin() ('b', 'f') If multiple values equal the minimum, the first row label with that value is returned. >>> s = ks.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) >>> s 10 1 3 100 5 1 2 100 1 1 8 100 Name: 0, dtype: int64 >>> s.idxmin() 10 """ sdf = self._internal._sdf scol = self.spark_column index_scols = self._internal.index_spark_columns # asc_nulls_(last|first)is used via Py4J directly because # it's not supported in Spark 2.3. if skipna: sdf = sdf.orderBy(Column(scol._jc.asc_nulls_last()), NATURAL_ORDER_COLUMN_NAME) else: sdf = sdf.orderBy(Column(scol._jc.asc_nulls_first()), NATURAL_ORDER_COLUMN_NAME) results = sdf.select([scol] + index_scols).take(1) if len(results) == 0: raise ValueError("attempt to get idxmin of an empty sequence") if results[0][0] is None: # This will only happens when skipna is False because we will # place nulls first. return np.nan values = list(results[0][1:]) if len(values) == 1: return values[0] else: return tuple(values) def pop(self, item): """ Return item and drop from sereis. Parameters ---------- item : str Label of index to be popped. Returns ------- Series Examples -------- >>> s = ks.Series(data=np.arange(3), index=['A', 'B', 'C']) >>> s A 0 B 1 C 2 Name: 0, dtype: int64 >>> s.pop('A') 0 >>> s B 1 C 2 Name: 0, dtype: int64 >>> s = ks.Series(data=np.arange(3), index=['A', 'A', 'C']) >>> s A 0 A 1 C 2 Name: 0, dtype: int64 >>> s.pop('A') A 0 A 1 Name: 0, dtype: int64 >>> s C 2 Name: 0, dtype: int64 Also support for MultiIndex >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], ... index=midx) >>> s lama speed 45.0 weight 200.0 length 1.2 cow speed 30.0 weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 >>> s.pop('lama') speed 45.0 weight 200.0 length 1.2 Name: 0, dtype: float64 >>> s cow speed 30.0 weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 Also support for MultiIndex with several indexs. >>> midx = pd.MultiIndex([['a', 'b', 'c'], ... ['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 0, 0, 0, 1, 1, 1], ... [0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 0, 2]] ... ) >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], ... index=midx) >>> s a lama speed 45.0 weight 200.0 length 1.2 cow speed 30.0 weight 250.0 length 1.5 b falcon speed 320.0 speed 1.0 length 0.3 Name: 0, dtype: float64 >>> s.pop(('a', 'lama')) speed 45.0 weight 200.0 length 1.2 Name: 0, dtype: float64 >>> s a cow speed 30.0 weight 250.0 length 1.5 b falcon speed 320.0 speed 1.0 length 0.3 Name: 0, dtype: float64 >>> s.pop(('b', 'falcon', 'speed')) (b, falcon, speed) 320.0 (b, falcon, speed) 1.0 Name: 0, dtype: float64 """ if not isinstance(item, (str, tuple)): raise ValueError("'key' should be string or tuple that contains strings") if isinstance(item, str): item = (item,) if not all(isinstance(index, str) for index in item): raise ValueError( "'key' should have index names as only strings " "or a tuple that contain index names as only strings" ) if len(self._internal._index_map) < len(item): raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(item), len(self._internal.index_map) ) ) cols = self._internal.index_spark_columns[len(item) :] + [ self._internal.spark_column_for(self._internal.column_labels[0]) ] rows = [self._internal.spark_columns[level] == index for level, index in enumerate(item)] sdf = self._internal.spark_frame.select(cols).filter(reduce(lambda x, y: x & y, rows)) if len(self._internal._index_map) == len(item): # if spark_frame has one column and one data, return data only without frame pdf = sdf.limit(2).toPandas() length = len(pdf) if length == 1: self._internal = self.drop(item)._internal return pdf[self.name].iloc[0] self._internal = self.drop(item)._internal item_string = name_like_string(item) sdf = sdf.withColumn(SPARK_DEFAULT_INDEX_NAME, F.lit(str(item_string))) internal = InternalFrame( spark_frame=sdf, index_map=OrderedDict({SPARK_DEFAULT_INDEX_NAME: None}) ) return first_series(DataFrame(internal)) internal = self._internal.copy( spark_frame=sdf, index_map=OrderedDict(list(self._internal._index_map.items())[len(item) :]), ) self._internal = self.drop(item)._internal return first_series(DataFrame(internal)) def copy(self, deep=None) -> "Series": """ Make a copy of this object's indices and data. Parameters ---------- deep : None this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : Series Examples -------- >>> s = ks.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 Name: 0, dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 Name: 0, dtype: int64 """ return first_series(DataFrame(self._internal.copy())) def mode(self, dropna=True) -> "Series": """ Return the mode(s) of the dataset. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series. Examples -------- >>> s = ks.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan]) >>> s 0 0.0 1 0.0 2 1.0 3 1.0 4 1.0 5 NaN 6 NaN 7 NaN Name: 0, dtype: float64 >>> s.mode() 0 1.0 Name: 0, dtype: float64 If there are several same modes, all items are shown >>> s = ks.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, ... np.nan, np.nan, np.nan]) >>> s 0 0.0 1 0.0 2 1.0 3 1.0 4 1.0 5 2.0 6 2.0 7 2.0 8 3.0 9 3.0 10 3.0 11 NaN 12 NaN 13 NaN Name: 0, dtype: float64 >>> s.mode().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS <BLANKLINE> ... 1.0 ... 2.0 ... 3.0 Name: 0, dtype: float64 With 'dropna' set to 'False', we can also see NaN in the result >>> s.mode(False).sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS <BLANKLINE> ... 1.0 ... 2.0 ... 3.0 ... NaN Name: 0, dtype: float64 """ ser_count = self.value_counts(dropna=dropna, sort=False) sdf_count = ser_count._internal.spark_frame most_value = ser_count.max() sdf_most_value = sdf_count.filter("count == {}".format(most_value)) sdf = sdf_most_value.select(F.col(SPARK_DEFAULT_INDEX_NAME).alias("0")) internal = InternalFrame(spark_frame=sdf, index_map=None) result = first_series(DataFrame(internal)) result.name = self.name return result def keys(self): """ Return alias for index. Returns ------- Index Index of the Series. Examples -------- >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) >>> kser.keys() # doctest: +SKIP MultiIndex([( 'lama', 'speed'), ( 'lama', 'weight'), ( 'lama', 'length'), ( 'cow', 'speed'), ( 'cow', 'weight'), ( 'cow', 'length'), ('falcon', 'speed'), ('falcon', 'weight'), ('falcon', 'length')], ) """ return self.index # TODO: 'regex', 'method' parameter def replace(self, to_replace=None, value=None, regex=False) -> "Series": """ Replace values given in to_replace with value. Values of the Series are replaced with other values dynamically. Parameters ---------- to_replace : str, list, dict, Series, int, float, or None How to find the values that will be replaced. * numeric, str: - numeric: numeric values equal to to_replace will be replaced with value - str: string exactly matching to_replace will be replaced with value * list of str or numeric: - if to_replace and value are both lists, they must be the same length. - str and numeric rules apply as above. * dict: - Dicts can be used to specify different replacement values for different existing values. For example, {'a': 'b', 'y': 'z'} replaces the value ‘a’ with ‘b’ and ‘y’ with ‘z’. To use a dict in this way the value parameter should be None. - For a DataFrame a dict can specify that different values should be replaced in different columns. For example, {'a': 1, 'b': 'z'} looks for the value 1 in column ‘a’ and the value ‘z’ in column ‘b’ and replaces these values with whatever is specified in value. The value parameter should not be None in this case. You can treat this as a special case of passing two lists except that you are specifying the column to search in. See the examples section for examples of each of these. value : scalar, dict, list, str default None Value to replace any values matching to_replace with. For a DataFrame a dict of values can be used to specify which value to use for each column (columns not in the dict will not be filled). Regular expressions, strings and lists or dicts of such objects are also allowed. Returns ------- Series Object after replacement. Examples -------- Scalar `to_replace` and `value` >>> s = ks.Series([0, 1, 2, 3, 4]) >>> s 0 0 1 1 2 2 3 3 4 4 Name: 0, dtype: int64 >>> s.replace(0, 5) 0 5 1 1 2 2 3 3 4 4 Name: 0, dtype: int64 List-like `to_replace` >>> s.replace([0, 4], 5000) 0 5000 1 1 2 2 3 3 4 5000 Name: 0, dtype: int64 >>> s.replace([1, 2, 3], [10, 20, 30]) 0 0 1 10 2 20 3 30 4 4 Name: 0, dtype: int64 Dict-like `to_replace` >>> s.replace({1: 1000, 2: 2000, 3: 3000, 4: 4000}) 0 0 1 1000 2 2000 3 3000 4 4000 Name: 0, dtype: int64 Also support for MultiIndex >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], ... index=midx) >>> s lama speed 45.0 weight 200.0 length 1.2 cow speed 30.0 weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 >>> s.replace(45, 450) lama speed 450.0 weight 200.0 length 1.2 cow speed 30.0 weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 >>> s.replace([45, 30, 320], 500) lama speed 500.0 weight 200.0 length 1.2 cow speed 500.0 weight 250.0 length 1.5 falcon speed 500.0 weight 1.0 length 0.3 Name: 0, dtype: float64 >>> s.replace({45: 450, 30: 300}) lama speed 450.0 weight 200.0 length 1.2 cow speed 300.0 weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 """ if to_replace is None: return self if not isinstance(to_replace, (str, list, dict, int, float)): raise ValueError("'to_replace' should be one of str, list, dict, int, float") if regex: raise NotImplementedError("replace currently not support for regex") if isinstance(to_replace, list) and isinstance(value, list): if not len(to_replace) == len(value): raise ValueError( "Replacement lists must match in length. Expecting {} got {}".format( len(to_replace), len(value) ) ) to_replace = {k: v for k, v in zip(to_replace, value)} if isinstance(to_replace, dict): is_start = True if len(to_replace) == 0: current = self.spark_column else: for to_replace_, value in to_replace.items(): if is_start: current = F.when(self.spark_column == F.lit(to_replace_), value) is_start = False else: current = current.when(self.spark_column == F.lit(to_replace_), value) current = current.otherwise(self.spark_column) else: current = F.when(self.spark_column.isin(to_replace), value).otherwise(self.spark_column) return self._with_new_scol(current) def update(self, other): """ Modify Series in place using non-NA values from passed Series. Aligns on index. Parameters ---------- other : Series Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> s = ks.Series([1, 2, 3]) >>> s.update(ks.Series([4, 5, 6])) >>> s.sort_index() 0 4 1 5 2 6 Name: 0, dtype: int64 >>> s = ks.Series(['a', 'b', 'c']) >>> s.update(ks.Series(['d', 'e'], index=[0, 2])) >>> s.sort_index() 0 d 1 b 2 e Name: 0, dtype: object >>> s = ks.Series([1, 2, 3]) >>> s.update(ks.Series([4, 5, 6, 7, 8])) >>> s.sort_index() 0 4 1 5 2 6 Name: 0, dtype: int64 >>> s = ks.Series([1, 2, 3], index=[10, 11, 12]) >>> s 10 1 11 2 12 3 Name: 0, dtype: int64 >>> s.update(ks.Series([4, 5, 6])) >>> s.sort_index() 10 1 11 2 12 3 Name: 0, dtype: int64 >>> s.update(ks.Series([4, 5, 6], index=[11, 12, 13])) >>> s.sort_index() 10 1 11 4 12 5 Name: 0, dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = ks.Series([1, 2, 3]) >>> s.update(ks.Series([4, np.nan, 6])) >>> s.sort_index() 0 4.0 1 2.0 2 6.0 Name: 0, dtype: float64 >>> reset_option("compute.ops_on_diff_frames") """ if not isinstance(other, Series): raise ValueError("'other' must be a Series") index_scol_names = [index_map[0] for index_map in self._internal.index_map.items()] combined = combine_frames(self.to_frame(), other.to_frame(), how="leftouter") combined_sdf = combined._sdf this_col = "__this_%s" % str( self._internal.spark_column_name_for(self._internal.column_labels[0]) ) that_col = "__that_%s" % str( self._internal.spark_column_name_for(other._internal.column_labels[0]) ) cond = ( F.when(scol_for(combined_sdf, that_col).isNotNull(), scol_for(combined_sdf, that_col)) .otherwise(combined_sdf[this_col]) .alias(str(self._internal.spark_column_name_for(self._internal.column_labels[0]))) ) internal = InternalFrame( spark_frame=combined_sdf.select(index_scol_names + [cond]), index_map=self._internal.index_map, column_labels=self._internal.column_labels, ) self_updated = first_series(ks.DataFrame(internal)) self._internal = self_updated._internal self._kdf = self_updated._kdf def where(self, cond, other=np.nan): """ Replace values where the condition is False. Parameters ---------- cond : boolean Series Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, Series Entries where cond is False are replaced with corresponding value from other. Returns ------- Series Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> s1 = ks.Series([0, 1, 2, 3, 4]) >>> s2 = ks.Series([100, 200, 300, 400, 500]) >>> s1.where(s1 > 0).sort_index() 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 Name: 0, dtype: float64 >>> s1.where(s1 > 1, 10).sort_index() 0 10 1 10 2 2 3 3 4 4 Name: 0, dtype: int64 >>> s1.where(s1 > 1, s1 + 100).sort_index() 0 100 1 101 2 2 3 3 4 4 Name: 0, dtype: int64 >>> s1.where(s1 > 1, s2).sort_index() 0 100 1 200 2 2 3 3 4 4 Name: 0, dtype: int64 >>> reset_option("compute.ops_on_diff_frames") """ assert isinstance(cond, Series) # We should check the DataFrame from both `cond` and `other`. should_try_ops_on_diff_frame = cond._kdf is not self._kdf or ( isinstance(other, Series) and other._kdf is not self._kdf ) if should_try_ops_on_diff_frame: # Try to perform it with 'compute.ops_on_diff_frame' option. kdf = self.to_frame() tmp_cond_col = verify_temp_column_name(kdf, "__tmp_cond_col__") tmp_other_col = verify_temp_column_name(kdf, "__tmp_other_col__") kdf[tmp_cond_col] = cond kdf[tmp_other_col] = other # above logic makes a Spark DataFrame looks like below: # +-----------------+---+----------------+-----------------+ # |__index_level_0__| 0|__tmp_cond_col__|__tmp_other_col__| # +-----------------+---+----------------+-----------------+ # | 0| 0| false| 100| # | 1| 1| false| 200| # | 3| 3| true| 400| # | 2| 2| true| 300| # | 4| 4| true| 500| # +-----------------+---+----------------+-----------------+ condition = ( F.when( kdf[tmp_cond_col].spark_column, kdf[self._internal.column_labels[0]].spark_column, ) .otherwise(kdf[tmp_other_col].spark_column) .alias(self._internal.data_spark_column_names[0]) ) internal = kdf._internal.with_new_columns( [condition], column_labels=self._internal.column_labels ) return first_series(DataFrame(internal)) else: if isinstance(other, Series): other = other.spark_column condition = ( F.when(cond.spark_column, self.spark_column) .otherwise(other) .alias(self._internal.data_spark_column_names[0]) ) return self._with_new_scol(condition) def mask(self, cond, other=np.nan): """ Replace values where the condition is True. Parameters ---------- cond : boolean Series Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, Series Entries where cond is True are replaced with corresponding value from other. Returns ------- Series Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> s1 = ks.Series([0, 1, 2, 3, 4]) >>> s2 = ks.Series([100, 200, 300, 400, 500]) >>> s1.mask(s1 > 0).sort_index() 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN Name: 0, dtype: float64 >>> s1.mask(s1 > 1, 10).sort_index() 0 0 1 1 2 10 3 10 4 10 Name: 0, dtype: int64 >>> s1.mask(s1 > 1, s1 + 100).sort_index() 0 0 1 1 2 102 3 103 4 104 Name: 0, dtype: int64 >>> s1.mask(s1 > 1, s2).sort_index() 0 0 1 1 2 300 3 400 4 500 Name: 0, dtype: int64 >>> reset_option("compute.ops_on_diff_frames") """ return self.where(~cond, other) def xs(self, key, level=None): """ Return cross-section from the Series. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. Returns ------- Series Cross-section from the original Series corresponding to the selected index levels. Examples -------- >>> midx = pd.MultiIndex([['a', 'b', 'c'], ... ['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], ... index=midx) >>> s a lama speed 45.0 weight 200.0 length 1.2 b cow speed 30.0 weight 250.0 length 1.5 c falcon speed 320.0 weight 1.0 length 0.3 Name: 0, dtype: float64 Get values at specified index >>> s.xs('a') lama speed 45.0 weight 200.0 length 1.2 Name: 0, dtype: float64 Get values at several indexes >>> s.xs(('a', 'lama')) speed 45.0 weight 200.0 length 1.2 Name: 0, dtype: float64 Get values at specified index and level >>> s.xs('lama', level=1) a speed 45.0 weight 200.0 length 1.2 Name: 0, dtype: float64 """ if not isinstance(key, tuple): key = (key,) if level is None: level = 0 cols = ( self._internal.index_spark_columns[:level] + self._internal.index_spark_columns[level + len(key) :] + [self._internal.spark_column_for(self._internal.column_labels[0])] ) rows = [self._internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)] sdf = self._internal.spark_frame.select(cols).where(reduce(lambda x, y: x & y, rows)) if len(self._internal._index_map) == len(key): # if spark_frame has one column and one data, return data only without frame pdf = sdf.limit(2).toPandas() length = len(pdf) if length == 1: return pdf[self.name].iloc[0] index_cols = [ col for col in sdf.columns if col not in self._internal.data_spark_column_names ] index_map_dict = dict(self._internal.index_map) internal = self._internal.copy( spark_frame=sdf, index_map=OrderedDict( (index_col, index_map_dict[index_col]) for index_col in index_cols ), ) return first_series(DataFrame(internal)) def pct_change(self, periods=1): """ Percentage change between the current and a prior element. .. note:: the current implementation of this API uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. Returns ------- Series Examples -------- >>> kser = ks.Series([90, 91, 85], index=[2, 4, 1]) >>> kser 2 90 4 91 1 85 Name: 0, dtype: int64 >>> kser.pct_change() 2 NaN 4 0.011111 1 -0.065934 Name: 0, dtype: float64 >>> kser.sort_index().pct_change() 1 NaN 2 0.058824 4 0.011111 Name: 0, dtype: float64 >>> kser.pct_change(periods=2) 2 NaN 4 NaN 1 -0.055556 Name: 0, dtype: float64 """ scol = self._internal.spark_column window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods) prev_row = F.lag(scol, periods).over(window) return self._with_new_scol((scol - prev_row) / prev_row) def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Parameters ---------- other : Series The value(s) to be combined with the `Series`. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine : Perform elementwise operation on two Series using a given function. Notes ----- Result index will be the union of the two indexes. Examples -------- >>> s1 = ks.Series([1, np.nan]) >>> s2 = ks.Series([3, 4]) >>> s1.combine_first(s2) 0 1.0 1 4.0 Name: 0, dtype: float64 """ if not isinstance(other, ks.Series): raise ValueError("`combine_first` only allows `Series` for parameter `other`") if self._kdf is other._kdf: this = self.name that = other.name combined = self._kdf else: this = "__this_{}".format(self.name) that = "__that_{}".format(other.name) with option_context("compute.ops_on_diff_frames", True): combined = combine_frames(self.to_frame(), other) sdf = combined._sdf # If `self` has missing value, use value of `other` cond = F.when(sdf[this].isNull(), sdf[that]).otherwise(sdf[this]) # If `self` and `other` come from same frame, the anchor should be kept if self._kdf is other._kdf: return self._with_new_scol(cond) index_scols = combined._internal.index_spark_columns sdf = sdf.select(*index_scols, cond.alias(self.name)).distinct() internal = InternalFrame( spark_frame=sdf, index_map=self._internal.index_map, column_labels=self._internal.column_labels, column_label_names=self._internal.column_label_names, ) return first_series(ks.DataFrame(internal)) def dot(self, other): """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame. It can also be called using `self @ other` in Python >= 3.5. .. note:: This API is slightly different from pandas when indexes from both are not aligned. To match with pandas', it requires to read the whole data for, for example, counting. pandas raises an exception; however, Koalas just proceeds and performs by ignoring mismatches with NaN permissively. >>> pdf1 = pd.Series([1, 2, 3], index=[0, 1, 2]) >>> pdf2 = pd.Series([1, 2, 3], index=[0, 1, 3]) >>> pdf1.dot(pdf2) # doctest: +SKIP ... ValueError: matrices are not aligned >>> kdf1 = ks.Series([1, 2, 3], index=[0, 1, 2]) >>> kdf2 = ks.Series([1, 2, 3], index=[0, 1, 3]) >>> kdf1.dot(kdf2) # doctest: +SKIP 5 Parameters ---------- other : Series, DataFrame. The other object to compute the dot product with its columns. Returns ------- scalar, Series Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = ks.Series([0, 1, 2, 3]) >>> s.dot(s) 14 >>> s @ s 14 """ if isinstance(other, DataFrame): raise ValueError( "Series.dot() is currently not supported with DataFrame since " "it will cause expansive calculation as many as the number " "of columns of DataFrame" ) if self._kdf is not other._kdf: if len(self.index) != len(other.index): raise ValueError("matrices are not aligned") if isinstance(other, Series): result = (self * other).sum() return result def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def repeat(self, repeats: int) -> "Series": """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. Examples -------- >>> s = ks.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c Name: 0, dtype: object >>> s.repeat(2) 0 a 1 b 2 c 0 a 1 b 2 c Name: 0, dtype: object >>> ks.Series([1, 2, 3]).repeat(0) Series([], Name: 0, dtype: int64) """ if not isinstance(repeats, int): raise ValueError("`repeats` argument must be integer, but got {}".format(type(repeats))) elif repeats < 0: raise ValueError("negative dimensions are not allowed") kdf = self.to_frame() if repeats == 0: return first_series(DataFrame(kdf._internal.with_filter(F.lit(False)))) else: return first_series(ks.concat([kdf] * repeats)) def asof(self, where): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. If there is no good value, NaN is returned. .. note:: This API is dependent on :meth:`Index.is_monotonic_increasing` which can be expensive. Parameters ---------- where : index or array-like of indices Returns ------- scalar or Series The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like Return scalar or Series Notes ----- Indices are assumed to be sorted. Raises if this is not the case. Examples -------- >>> s = ks.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 Name: 0, dtype: float64 A scalar `where`. >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]).sort_index() 5 NaN 20 2.0 Name: 0, dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 """ should_return_series = True if isinstance(self.index, ks.MultiIndex): raise ValueError("asof is not supported for a MultiIndex") if isinstance(where, (ks.Index, ks.Series, ks.DataFrame)): raise ValueError("where cannot be an Index, Series or a DataFrame") if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") if not is_list_like(where): should_return_series = False where = [where] sdf = self._internal._sdf index_scol = self._internal.index_spark_columns[0] cond = [F.max(F.when(index_scol <= index, self.spark_column)) for index in where] sdf = sdf.select(cond) if not should_return_series: result = sdf.head()[0] return result if result is not None else np.nan # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context( "compute.default_index_type", "distributed", "compute.max_rows", None ): kdf = ks.DataFrame(sdf) kdf.columns = pd.Index(where) result_series = first_series(kdf.transpose()) result_series.name = self.name return result_series def mad(self): """ Return the mean absolute deviation of values. Examples -------- >>> s = ks.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 Name: 0, dtype: int64 >>> s.mad() 1.0 """ sdf = self._internal.spark_frame spark_column = self._internal.spark_column avg = unpack_scalar(sdf.select(F.avg(spark_column))) mad = unpack_scalar(sdf.select(F.avg(F.abs(spark_column - avg)))) return mad def unstack(self, level=-1): """ Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. The level involved will automatically get sorted. Notes ----- Unlike pandas, Koalas doesn't check whether an index is duplicated or not because the checking of duplicated index requires scanning whole data which can be quite expensive. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. Returns ------- DataFrame Unstacked Series. Examples -------- >>> s = ks.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 Name: 0, dtype: int64 >>> s.unstack(level=-1).sort_index() a b one 1 2 two 3 4 >>> s.unstack(level=0).sort_index() one two a 1 3 b 2 4 """ if not isinstance(self.index, ks.MultiIndex): raise ValueError("Series.unstack only support for a MultiIndex") index_nlevels = self.index.nlevels if level > 0 and (level > index_nlevels - 1): raise IndexError( "Too many levels: Index has only {} levels, not {}".format(index_nlevels, level + 1) ) elif level < 0 and (level < -index_nlevels): raise IndexError( "Too many levels: Index has only {} levels, {} is not a valid level number".format( index_nlevels, level ) ) sdf = self._internal.spark_frame index_scol_names = self._internal.index_spark_column_names.copy() pivot_col = index_scol_names.pop(level) data_scol_name = self._internal.data_spark_column_names[0] sdf = sdf.groupby(index_scol_names).pivot(pivot_col).sum(data_scol_name) internal = InternalFrame( spark_frame=sdf, index_map=OrderedDict((index_scol_name, None) for index_scol_name in index_scol_names), ) return DataFrame(internal) def item(self): """ Return the first element of the underlying data as a Python scalar. Returns ------- scalar The first element of Series. Raises ------ ValueError If the data is not length-1. Examples -------- >>> kser = ks.Series([10]) >>> kser.item() 10 """ item_top_two = self[:2] if len(item_top_two) != 1: raise ValueError("can only convert an array of size 1 to a Python scalar") return item_top_two[0] def _cum(self, func, skipna, part_cols=()): # This is used to cummin, cummax, cumsum, etc. window = ( Window.orderBy(NATURAL_ORDER_COLUMN_NAME) .partitionBy(*part_cols) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) if skipna: # There is a behavior difference between pandas and PySpark. In case of cummax, # # Input: # A B # 0 2.0 1.0 # 1 5.0 NaN # 2 1.0 0.0 # 3 2.0 4.0 # 4 4.0 9.0 # # pandas: # A B # 0 2.0 1.0 # 1 5.0 NaN # 2 5.0 1.0 # 3 5.0 4.0 # 4 5.0 9.0 # # PySpark: # A B # 0 2.0 1.0 # 1 5.0 1.0 # 2 5.0 1.0 # 3 5.0 4.0 # 4 5.0 9.0 scol = F.when( # Manually sets nulls given the column defined above. self.spark_column.isNull(), F.lit(None), ).otherwise(func(self.spark_column).over(window)) else: # Here, we use two Windows. # One for real data. # The other one for setting nulls after the first null it meets. # # There is a behavior difference between pandas and PySpark. In case of cummax, # # Input: # A B # 0 2.0 1.0 # 1 5.0 NaN # 2 1.0 0.0 # 3 2.0 4.0 # 4 4.0 9.0 # # pandas: # A B # 0 2.0 1.0 # 1 5.0 NaN # 2 5.0 NaN # 3 5.0 NaN # 4 5.0 NaN # # PySpark: # A B # 0 2.0 1.0 # 1 5.0 1.0 # 2 5.0 1.0 # 3 5.0 4.0 # 4 5.0 9.0 scol = F.when( # By going through with max, it sets True after the first time it meets null. F.max(self.spark_column.isNull()).over(window), # Manually sets nulls given the column defined above. F.lit(None), ).otherwise(func(self.spark_column).over(window)) return self._with_new_scol(scol).rename(self.name) def _cumprod(self, skipna, part_cols=()): from pyspark.sql.functions import pandas_udf def cumprod(scol): @pandas_udf(returnType=self.spark_type) def negative_check(s): assert len(s) == 0 or ((s > 0) | (s.isnull())).all(), ( "values should be bigger than 0: %s" % s ) return s return F.sum(F.log(negative_check(scol))) kser = self._cum(cumprod, skipna, part_cols) return kser._with_new_scol(F.exp(kser.spark_column)).rename(self.name) # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- dt = CachedAccessor("dt", DatetimeMethods) str = CachedAccessor("str", StringMethods) # ---------------------------------------------------------------------- def _apply_series_op(self, op): return op(self) def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=None): """ Applies sfun to the column and returns a scalar Parameters ---------- sfun : the stats function to be used for aggregation name : original pandas API name. axis : used only for sanity check because series only support index axis. numeric_only : not used by this implementation, but passed down by stats functions """ from inspect import signature axis = validate_axis(axis) if axis == 1: raise ValueError("Series does not support columns axis.") num_args = len(signature(sfun).parameters) col_sdf = self.spark_column col_type = self.spark_type if isinstance(col_type, BooleanType) and sfun.__name__ not in ("min", "max"): # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast("integer") if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) return unpack_scalar(self._internal._sdf.select(col_sdf)) def __len__(self): return len(self.to_dataframe()) def __getitem__(self, key): try: if (isinstance(key, slice) and any(type(n) == int for n in [key.start, key.stop])) or ( type(key) == int and not isinstance(self.index.spark_type, (IntegerType, LongType)) ): # Seems like pandas Series always uses int as positional search when slicing # with ints, searches based on index values when the value is int. return self.iloc[key] return self.loc[key] except SparkPandasIndexingError: raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(key), len(self._internal.index_map) ) ) def __getattr__(self, item: str_type) -> Any: if item.startswith("__"): raise AttributeError(item) if hasattr(MissingPandasLikeSeries, item): property_or_func = getattr(MissingPandasLikeSeries, item) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) raise AttributeError("'Series' object has no attribute '{}'".format(item)) def _to_internal_pandas(self): """ Return a pandas Series directly from _internal to avoid overhead of copy. This method is for internal use only. """ return first_series(self._internal.to_pandas_frame) def __repr__(self): max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_string(name=self.name, dtype=self.dtype) pser = self.head(max_display_count + 1)._to_internal_pandas() pser_length = len(pser) pser = pser.iloc[:max_display_count] if pser_length > max_display_count: repr_string = pser.to_string(length=True) rest, prev_footer = repr_string.rsplit("\n", 1) match = REPR_PATTERN.search(prev_footer) if match is not None: length = match.group("length") name = str(self.dtype.name) footer = "\nName: {name}, dtype: {dtype}\nShowing only the first {length}".format( length=length, name=self.name, dtype=pprint_thing(name) ) return rest + footer return pser.to_string(name=self.name, dtype=self.dtype) def __dir__(self): if not isinstance(self.spark_type, StructType): fields = [] else: fields = [f for f in self.spark_type.fieldNames() if " " not in f] return super(Series, self).__dir__() + fields def __iter__(self): return MissingPandasLikeSeries.__iter__(self) def _equals(self, other: "Series") -> bool: return self.spark_column._jc.equals(other.spark_column._jc) def unpack_scalar(sdf): """ Takes a dataframe that is supposed to contain a single row with a single scalar value, and returns this value. """ l = sdf.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row.asDict().values()) assert len(l2) == 1, (row, l2) return l2[0] def first_series(df): """ Takes a DataFrame and returns the first column of the DataFrame as a Series """ assert isinstance(df, (DataFrame, pd.DataFrame)), type(df) if isinstance(df, DataFrame): return df._kser_for(df._internal.column_labels[0]) else: return df[df.columns[0]]
1
15,233
I think you can just simply fix this line to `self[:2].to_pandas()`
databricks-koalas
py
@@ -1229,6 +1229,14 @@ class WebDriver { if (target && cdpTargets.indexOf(target.toLowerCase()) === -1) { throw new error.InvalidArgumentError('invalid target value') } + + if (debuggerAddress.match(/\/se\/cdp/)) { + if (debuggerAddress.match("ws:\/\/", "http:\/\/")) { + return debuggerAddress.replace("ws:\/\/", "http:\/\/") + } + return debuggerAddress + } + const path = '/json/version' let request = new http.Request('GET', path) let client = new http.HttpClient('http://' + debuggerAddress)
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview The heart of the WebDriver JavaScript API. */ 'use strict' const by = require('./by') const { RelativeBy } = require('./by') const command = require('./command') const error = require('./error') const input = require('./input') const logging = require('./logging') const promise = require('./promise') const Symbols = require('./symbols') const cdpTargets = ['page', 'browser'] const cdp = require('../devtools/CDPConnection') const WebSocket = require('ws') const http = require('../http/index') const fs = require('fs') const { Capabilities } = require('./capabilities') const path = require('path') const { NoSuchElementError } = require('./error') // Capability names that are defined in the W3C spec. const W3C_CAPABILITY_NAMES = new Set([ 'acceptInsecureCerts', 'browserName', 'browserVersion', 'platformName', 'pageLoadStrategy', 'proxy', 'setWindowRect', 'timeouts', 'strictFileInteractability', 'unhandledPromptBehavior', ]) /** * Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait * command}. * * @template OUT */ class Condition { /** * @param {string} message A descriptive error message. Should complete the * sentence "Waiting [...]" * @param {function(!WebDriver): OUT} fn The condition function to * evaluate on each iteration of the wait loop. */ constructor(message, fn) { /** @private {string} */ this.description_ = 'Waiting ' + message /** @type {function(!WebDriver): OUT} */ this.fn = fn } /** @return {string} A description of this condition. */ description() { return this.description_ } } /** * Defines a condition that will result in a {@link WebElement}. * * @extends {Condition<!(WebElement|IThenable<!WebElement>)>} */ class WebElementCondition extends Condition { /** * @param {string} message A descriptive error message. Should complete the * sentence "Waiting [...]" * @param {function(!WebDriver): !(WebElement|IThenable<!WebElement>)} * fn The condition function to evaluate on each iteration of the wait * loop. */ constructor(message, fn) { super(message, fn) } } ////////////////////////////////////////////////////////////////////////////// // // WebDriver // ////////////////////////////////////////////////////////////////////////////// /** * Translates a command to its wire-protocol representation before passing it * to the given `executor` for execution. * @param {!command.Executor} executor The executor to use. * @param {!command.Command} command The command to execute. * @return {!Promise} A promise that will resolve with the command response. */ function executeCommand(executor, command) { return toWireValue(command.getParameters()).then(function (parameters) { command.setParameters(parameters) return executor.execute(command) }) } /** * Converts an object to its JSON representation in the WebDriver wire protocol. * When converting values of type object, the following steps will be taken: * <ol> * <li>if the object is a WebElement, the return value will be the element's * server ID * <li>if the object defines a {@link Symbols.serialize} method, this algorithm * will be recursively applied to the object's serialized representation * <li>if the object provides a "toJSON" function, this algorithm will * recursively be applied to the result of that function * <li>otherwise, the value of each key will be recursively converted according * to the rules above. * </ol> * * @param {*} obj The object to convert. * @return {!Promise<?>} A promise that will resolve to the input value's JSON * representation. */ async function toWireValue(obj) { let value = await Promise.resolve(obj) if (value === void 0 || value === null) { return value } if ( typeof value === 'boolean' || typeof value === 'number' || typeof value === 'string' ) { return value } if (Array.isArray(value)) { return convertKeys(value) } if (typeof value === 'function') { return '' + value } if (typeof value[Symbols.serialize] === 'function') { return toWireValue(value[Symbols.serialize]()) } else if (typeof value.toJSON === 'function') { return toWireValue(value.toJSON()) } return convertKeys(value) } async function convertKeys(obj) { const isArray = Array.isArray(obj) const numKeys = isArray ? obj.length : Object.keys(obj).length const ret = isArray ? new Array(numKeys) : {} if (!numKeys) { return ret } async function forEachKey(obj, fn) { if (Array.isArray(obj)) { for (let i = 0, n = obj.length; i < n; i++) { await fn(obj[i], i) } } else { for (let key in obj) { await fn(obj[key], key) } } } await forEachKey(obj, async function (value, key) { ret[key] = await toWireValue(value) }) return ret } /** * Converts a value from its JSON representation according to the WebDriver wire * protocol. Any JSON object that defines a WebElement ID will be decoded to a * {@link WebElement} object. All other values will be passed through as is. * * @param {!WebDriver} driver The driver to use as the parent of any unwrapped * {@link WebElement} values. * @param {*} value The value to convert. * @return {*} The converted value. */ function fromWireValue(driver, value) { if (Array.isArray(value)) { value = value.map((v) => fromWireValue(driver, v)) } else if (WebElement.isId(value)) { let id = WebElement.extractId(value) value = new WebElement(driver, id) } else if (value && typeof value === 'object') { let result = {} for (let key in value) { if (Object.prototype.hasOwnProperty.call(value, key)) { result[key] = fromWireValue(driver, value[key]) } } value = result } return value } /** * Resolves a wait message from either a function or a string. * @param {(string|Function)=} message An optional message to use if the wait times out. * @return {string} The resolved message */ function resolveWaitMessage(message) { return message ? `${typeof message === 'function' ? message() : message}\n` : '' } /** * Structural interface for a WebDriver client. * * @record */ class IWebDriver { /** * Executes the provided {@link command.Command} using this driver's * {@link command.Executor}. * * @param {!command.Command} command The command to schedule. * @return {!Promise<T>} A promise that will be resolved with the command * result. * @template T */ execute(command) { } // eslint-disable-line /** * Sets the {@linkplain input.FileDetector file detector} that should be * used with this instance. * @param {input.FileDetector} detector The detector to use or `null`. */ setFileDetector(detector) { } // eslint-disable-line /** * @return {!command.Executor} The command executor used by this instance. */ getExecutor() { } /** * @return {!Promise<!Session>} A promise for this client's session. */ getSession() { } /** * @return {!Promise<!Capabilities>} A promise that will resolve with * the this instance's capabilities. */ getCapabilities() { } /** * Terminates the browser session. After calling quit, this instance will be * invalidated and may no longer be used to issue commands against the * browser. * * @return {!Promise<void>} A promise that will be resolved when the * command has completed. */ quit() { } /** * Creates a new action sequence using this driver. The sequence will not be * submitted for execution until * {@link ./input.Actions#perform Actions.perform()} is called. * * @param {{async: (boolean|undefined), * bridge: (boolean|undefined)}=} options Configuration options for * the action sequence (see {@link ./input.Actions Actions} documentation * for details). * @return {!input.Actions} A new action sequence for this instance. */ actions(options) { } // eslint-disable-line /** * Executes a snippet of JavaScript in the context of the currently selected * frame or window. The script fragment will be executed as the body of an * anonymous function. If the script is provided as a function object, that * function will be converted to a string for injection into the target * window. * * Any arguments provided in addition to the script will be included as script * arguments and may be referenced using the `arguments` object. Arguments may * be a boolean, number, string, or {@linkplain WebElement}. Arrays and * objects may also be used as script arguments as long as each item adheres * to the types previously mentioned. * * The script may refer to any variables accessible from the current window. * Furthermore, the script will execute in the window's context, thus * `document` may be used to refer to the current document. Any local * variables will not be available once the script has finished executing, * though global variables will persist. * * If the script has a return value (i.e. if the script contains a return * statement), then the following steps will be taken for resolving this * functions return value: * * - For a HTML element, the value will resolve to a {@linkplain WebElement} * - Null and undefined return values will resolve to null</li> * - Booleans, numbers, and strings will resolve as is</li> * - Functions will resolve to their string representation</li> * - For arrays and objects, each member item will be converted according to * the rules above * * @param {!(string|Function)} script The script to execute. * @param {...*} args The arguments to pass to the script. * @return {!IThenable<T>} A promise that will resolve to the * scripts return value. * @template T */ executeScript(script, ...args) { } // eslint-disable-line /** * Executes a snippet of asynchronous JavaScript in the context of the * currently selected frame or window. The script fragment will be executed as * the body of an anonymous function. If the script is provided as a function * object, that function will be converted to a string for injection into the * target window. * * Any arguments provided in addition to the script will be included as script * arguments and may be referenced using the `arguments` object. Arguments may * be a boolean, number, string, or {@linkplain WebElement}. Arrays and * objects may also be used as script arguments as long as each item adheres * to the types previously mentioned. * * Unlike executing synchronous JavaScript with {@link #executeScript}, * scripts executed with this function must explicitly signal they are * finished by invoking the provided callback. This callback will always be * injected into the executed function as the last argument, and thus may be * referenced with `arguments[arguments.length - 1]`. The following steps * will be taken for resolving this functions return value against the first * argument to the script's callback function: * * - For a HTML element, the value will resolve to a {@link WebElement} * - Null and undefined return values will resolve to null * - Booleans, numbers, and strings will resolve as is * - Functions will resolve to their string representation * - For arrays and objects, each member item will be converted according to * the rules above * * __Example #1:__ Performing a sleep that is synchronized with the currently * selected window: * * var start = new Date().getTime(); * driver.executeAsyncScript( * 'window.setTimeout(arguments[arguments.length - 1], 500);'). * then(function() { * console.log( * 'Elapsed time: ' + (new Date().getTime() - start) + ' ms'); * }); * * __Example #2:__ Synchronizing a test with an AJAX application: * * var button = driver.findElement(By.id('compose-button')); * button.click(); * driver.executeAsyncScript( * 'var callback = arguments[arguments.length - 1];' + * 'mailClient.getComposeWindowWidget().onload(callback);'); * driver.switchTo().frame('composeWidget'); * driver.findElement(By.id('to')).sendKeys('[email protected]'); * * __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In * this example, the inject script is specified with a function literal. When * using this format, the function is converted to a string for injection, so * it should not reference any symbols not defined in the scope of the page * under test. * * driver.executeAsyncScript(function() { * var callback = arguments[arguments.length - 1]; * var xhr = new XMLHttpRequest(); * xhr.open("GET", "/resource/data.json", true); * xhr.onreadystatechange = function() { * if (xhr.readyState == 4) { * callback(xhr.responseText); * } * }; * xhr.send(''); * }).then(function(str) { * console.log(JSON.parse(str)['food']); * }); * * @param {!(string|Function)} script The script to execute. * @param {...*} args The arguments to pass to the script. * @return {!IThenable<T>} A promise that will resolve to the scripts return * value. * @template T */ executeAsyncScript(script, ...args) { } // eslint-disable-line /** * Waits for a condition to evaluate to a "truthy" value. The condition may be * specified by a {@link Condition}, as a custom function, or as any * promise-like thenable. * * For a {@link Condition} or function, the wait will repeatedly * evaluate the condition until it returns a truthy value. If any errors occur * while evaluating the condition, they will be allowed to propagate. In the * event a condition returns a {@linkplain Promise}, the polling loop will * wait for it to be resolved and use the resolved value for whether the * condition has been satisfied. The resolution time for a promise is always * factored into whether a wait has timed out. * * If the provided condition is a {@link WebElementCondition}, then * the wait will return a {@link WebElementPromise} that will resolve to the * element that satisfied the condition. * * _Example:_ waiting up to 10 seconds for an element to be present on the * page. * * async function example() { * let button = * await driver.wait(until.elementLocated(By.id('foo')), 10000); * await button.click(); * } * * @param {!(IThenable<T>| * Condition<T>| * function(!WebDriver): T)} condition The condition to * wait on, defined as a promise, condition object, or a function to * evaluate as a condition. * @param {number=} timeout The duration in milliseconds, how long to wait * for the condition to be true. * @param {(string|Function)=} message An optional message to use if the wait times out. * @param {number=} pollTimeout The duration in milliseconds, how long to * wait between polling the condition. * @return {!(IThenable<T>|WebElementPromise)} A promise that will be * resolved with the first truthy value returned by the condition * function, or rejected if the condition times out. If the input * input condition is an instance of a {@link WebElementCondition}, * the returned value will be a {@link WebElementPromise}. * @throws {TypeError} if the provided `condition` is not a valid type. * @template T */ wait( condition, // eslint-disable-line timeout = undefined, // eslint-disable-line message = undefined, // eslint-disable-line pollTimeout = undefined // eslint-disable-line ) { } /** * Makes the driver sleep for the given amount of time. * * @param {number} ms The amount of time, in milliseconds, to sleep. * @return {!Promise<void>} A promise that will be resolved when the sleep has * finished. */ sleep(ms) { } // eslint-disable-line /** * Retrieves the current window handle. * * @return {!Promise<string>} A promise that will be resolved with the current * window handle. */ getWindowHandle() { } /** * Retrieves a list of all available window handles. * * @return {!Promise<!Array<string>>} A promise that will be resolved with an * array of window handles. */ getAllWindowHandles() { } /** * Retrieves the current page's source. The returned source is a representation * of the underlying DOM: do not expect it to be formatted or escaped in the * same way as the raw response sent from the web server. * * @return {!Promise<string>} A promise that will be resolved with the current * page source. */ getPageSource() { } /** * Closes the current window. * * @return {!Promise<void>} A promise that will be resolved when this command * has completed. */ close() { } /** * Navigates to the given URL. * * @param {string} url The fully qualified URL to open. * @return {!Promise<void>} A promise that will be resolved when the document * has finished loading. */ get(url) { } // eslint-disable-line /** * Retrieves the URL for the current page. * * @return {!Promise<string>} A promise that will be resolved with the * current URL. */ getCurrentUrl() { } /** * Retrieves the current page title. * * @return {!Promise<string>} A promise that will be resolved with the current * page's title. */ getTitle() { } /** * Locates an element on the page. If the element cannot be found, a * {@link error.NoSuchElementError} will be returned by the driver. * * This function should not be used to test whether an element is present on * the page. Rather, you should use {@link #findElements}: * * driver.findElements(By.id('foo')) * .then(found => console.log('Element found? %s', !!found.length)); * * The search criteria for an element may be defined using one of the * factories in the {@link webdriver.By} namespace, or as a short-hand * {@link webdriver.By.Hash} object. For example, the following two statements * are equivalent: * * var e1 = driver.findElement(By.id('foo')); * var e2 = driver.findElement({id:'foo'}); * * You may also provide a custom locator function, which takes as input this * instance and returns a {@link WebElement}, or a promise that will resolve * to a WebElement. If the returned promise resolves to an array of * WebElements, WebDriver will use the first element. For example, to find the * first visible link on a page, you could write: * * var link = driver.findElement(firstVisibleLink); * * function firstVisibleLink(driver) { * var links = driver.findElements(By.tagName('a')); * return promise.filter(links, function(link) { * return link.isDisplayed(); * }); * } * * @param {!(by.By|Function)} locator The locator to use. * @return {!WebElementPromise} A WebElement that can be used to issue * commands against the located element. If the element is not found, the * element will be invalidated and all scheduled commands aborted. */ findElement(locator) { } // eslint-disable-line /** * Search for multiple elements on the page. Refer to the documentation on * {@link #findElement(by)} for information on element locator strategies. * * @param {!(by.By|Function)} locator The locator to use. * @return {!Promise<!Array<!WebElement>>} A promise that will resolve to an * array of WebElements. */ findElements(locator) { } // eslint-disable-line /** * Takes a screenshot of the current page. The driver makes a best effort to * return a screenshot of the following, in order of preference: * * 1. Entire page * 2. Current window * 3. Visible portion of the current frame * 4. The entire display containing the browser * * @return {!Promise<string>} A promise that will be resolved to the * screenshot as a base-64 encoded PNG. */ takeScreenshot() { } /** * @return {!Options} The options interface for this instance. */ manage() { } /** * @return {!Navigation} The navigation interface for this instance. */ navigate() { } /** * @return {!TargetLocator} The target locator interface for this * instance. */ switchTo() { } /** * * Takes a PDF of the current page. The driver makes a best effort to * return a PDF based on the provided parameters. * * @param {{orientation: (string|undefined), * scale: (number|undefined), * background: (boolean|undefined) * width: (number|undefined) * height: (number|undefined) * top: (number|undefined) * bottom: (number|undefined) * left: (number|undefined) * right: (number|undefined) * shrinkToFit: (boolean|undefined) * pageRanges: (<Array>|undefined)}} options. */ printPage(options) { } // eslint-disable-line } /** * @param {!Capabilities} capabilities A capabilities object. * @return {!Capabilities} A copy of the parameter capabilities, omitting * capability names that are not valid W3C names. */ function filterNonW3CCaps(capabilities) { let newCaps = new Capabilities(capabilities) for (let k of newCaps.keys()) { // Any key containing a colon is a vendor-prefixed capability. if (!(W3C_CAPABILITY_NAMES.has(k) || k.indexOf(':') >= 0)) { newCaps.delete(k) } } return newCaps } /** * Each WebDriver instance provides automated control over a browser session. * * @implements {IWebDriver} */ class WebDriver { /** * @param {!(./session.Session|IThenable<!./session.Session>)} session Either * a known session or a promise that will be resolved to a session. * @param {!command.Executor} executor The executor to use when sending * commands to the browser. * @param {(function(this: void): ?)=} onQuit A function to call, if any, * when the session is terminated. */ constructor(session, executor, onQuit = undefined) { /** @private {!Promise<!Session>} */ this.session_ = Promise.resolve(session) // If session is a rejected promise, add a no-op rejection handler. // This effectively hides setup errors until users attempt to interact // with the session. this.session_.catch(function () { }) /** @private {!command.Executor} */ this.executor_ = executor /** @private {input.FileDetector} */ this.fileDetector_ = null /** @private @const {(function(this: void): ?|undefined)} */ this.onQuit_ = onQuit } /** * Creates a new WebDriver session. * * This function will always return a WebDriver instance. If there is an error * creating the session, such as the aforementioned SessionNotCreatedError, * the driver will have a rejected {@linkplain #getSession session} promise. * This rejection will propagate through any subsequent commands scheduled * on the returned WebDriver instance. * * let required = Capabilities.firefox(); * let driver = WebDriver.createSession(executor, {required}); * * // If the createSession operation failed, then this command will also * // also fail, propagating the creation failure. * driver.get('http://www.google.com').catch(e => console.log(e)); * * @param {!command.Executor} executor The executor to create the new session * with. * @param {!Capabilities} capabilities The desired capabilities for the new * session. * @param {(function(this: void): ?)=} onQuit A callback to invoke when * the newly created session is terminated. This should be used to clean * up any resources associated with the session. * @return {!WebDriver} The driver for the newly created session. */ static createSession(executor, capabilities, onQuit = undefined) { let cmd = new command.Command(command.Name.NEW_SESSION) // For OSS remote ends. cmd.setParameter('desiredCapabilities', capabilities) // For W3C remote ends. cmd.setParameter('capabilities', { alwaysMatch: filterNonW3CCaps(capabilities), }) let session = executeCommand(executor, cmd) if (typeof onQuit === 'function') { session = session.catch((err) => { return Promise.resolve(onQuit.call(void 0)).then((_) => { throw err }) }) } return new this(session, executor, onQuit) } /** @override */ async execute(command) { command.setParameter('sessionId', this.session_) let parameters = await toWireValue(command.getParameters()) command.setParameters(parameters) let value = await this.executor_.execute(command) return fromWireValue(this, value) } /** @override */ setFileDetector(detector) { this.fileDetector_ = detector } /** @override */ getExecutor() { return this.executor_ } /** @override */ getSession() { return this.session_ } /** @override */ getCapabilities() { return this.session_.then((s) => s.getCapabilities()) } /** @override */ quit() { let result = this.execute(new command.Command(command.Name.QUIT)) // Delete our session ID when the quit command finishes; this will allow us // to throw an error when attempting to use a driver post-quit. return promise.finally(result, () => { this.session_ = Promise.reject( new error.NoSuchSessionError( 'This driver instance does not have a valid session ID ' + '(did you call WebDriver.quit()?) and may no longer be used.' ) ) // Only want the session rejection to bubble if accessed. this.session_.catch(function () { }) if (this.onQuit_) { return this.onQuit_.call(void 0) } }) } /** @override */ actions(options) { return new input.Actions(this, options || undefined) } /** @override */ executeScript(script, ...args) { if (typeof script === 'function') { script = 'return (' + script + ').apply(null, arguments);' } return this.execute( new command.Command(command.Name.EXECUTE_SCRIPT) .setParameter('script', script) .setParameter('args', args) ) } /** @override */ executeAsyncScript(script, ...args) { if (typeof script === 'function') { script = 'return (' + script + ').apply(null, arguments);' } return this.execute( new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT) .setParameter('script', script) .setParameter('args', args) ) } /** @override */ wait(condition, timeout = 0, message = undefined, pollTimeout = 200) { if (typeof timeout !== 'number' || timeout < 0) { throw TypeError('timeout must be a number >= 0: ' + timeout) } if (typeof pollTimeout !== 'number' || pollTimeout < 0) { throw TypeError('pollTimeout must be a number >= 0: ' + pollTimeout) } if (promise.isPromise(condition)) { return new Promise((resolve, reject) => { if (!timeout) { resolve(condition) return } let start = Date.now() let timer = setTimeout(function () { timer = null try { let timeoutMessage = resolveWaitMessage(message) reject( new error.TimeoutError( `${timeoutMessage}Timed out waiting for promise to resolve after ${Date.now() - start }ms` ) ) } catch (ex) { reject( new error.TimeoutError( `${ex.message }\nTimed out waiting for promise to resolve after ${Date.now() - start }ms` ) ) } }, timeout) const clearTimer = () => timer && clearTimeout(timer) /** @type {!IThenable} */ condition.then( function (value) { clearTimer() resolve(value) }, function (error) { clearTimer() reject(error) } ) }) } let fn = /** @type {!Function} */ (condition) if (condition instanceof Condition) { message = message || condition.description() fn = condition.fn } if (typeof fn !== 'function') { throw TypeError( 'Wait condition must be a promise-like object, function, or a ' + 'Condition object' ) } const driver = this function evaluateCondition() { return new Promise((resolve, reject) => { try { resolve(fn(driver)) } catch (ex) { reject(ex) } }) } let result = new Promise((resolve, reject) => { const startTime = Date.now() const pollCondition = async () => { evaluateCondition().then(function (value) { const elapsed = Date.now() - startTime if (value) { resolve(value) } else if (timeout && elapsed >= timeout) { try { let timeoutMessage = resolveWaitMessage(message) reject( new error.TimeoutError( `${timeoutMessage}Wait timed out after ${elapsed}ms` ) ) } catch (ex) { reject( new error.TimeoutError( `${ex.message}\nWait timed out after ${elapsed}ms` ) ) } } else { setTimeout(pollCondition, pollTimeout) } }, reject) } pollCondition() }) if (condition instanceof WebElementCondition) { result = new WebElementPromise( this, result.then(function (value) { if (!(value instanceof WebElement)) { throw TypeError( 'WebElementCondition did not resolve to a WebElement: ' + Object.prototype.toString.call(value) ) } return value }) ) } return result } /** @override */ sleep(ms) { return new Promise((resolve) => setTimeout(resolve, ms)) } /** @override */ getWindowHandle() { return this.execute( new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE) ) } /** @override */ getAllWindowHandles() { return this.execute(new command.Command(command.Name.GET_WINDOW_HANDLES)) } /** @override */ getPageSource() { return this.execute(new command.Command(command.Name.GET_PAGE_SOURCE)) } /** @override */ close() { return this.execute(new command.Command(command.Name.CLOSE)) } /** @override */ get(url) { return this.navigate().to(url) } /** @override */ getCurrentUrl() { return this.execute(new command.Command(command.Name.GET_CURRENT_URL)) } /** @override */ getTitle() { return this.execute(new command.Command(command.Name.GET_TITLE)) } /** @override */ findElement(locator) { let id let cmd = null if (locator instanceof RelativeBy) { cmd = new command.Command( command.Name.FIND_ELEMENTS_RELATIVE ).setParameter('args', locator.marshall()) } else { locator = by.checkedLocator(locator) } if (typeof locator === 'function') { id = this.findElementInternal_(locator, this) return new WebElementPromise(this, id) } else if (cmd === null) { cmd = new command.Command(command.Name.FIND_ELEMENT) .setParameter('using', locator.using) .setParameter('value', locator.value) } id = this.execute(cmd) if (locator instanceof RelativeBy) { return this.normalize_(id) } else { return new WebElementPromise(this, id) } } /** * @param {!Function} webElementPromise The webElement in unresolved state * @return {!Promise<!WebElement>} First single WebElement from array of resolved promises */ async normalize_(webElementPromise) { let result = await webElementPromise if (result.length === 0) { throw new NoSuchElementError( 'Cannot locate an element with provided parameters' ) } else { return result[0] } } /** * @param {!Function} locatorFn The locator function to use. * @param {!(WebDriver|WebElement)} context The search context. * @return {!Promise<!WebElement>} A promise that will resolve to a list of * WebElements. * @private */ async findElementInternal_(locatorFn, context) { let result = await locatorFn(context) if (Array.isArray(result)) { result = result[0] } if (!(result instanceof WebElement)) { throw new TypeError('Custom locator did not return a WebElement') } return result } /** @override */ async findElements(locator) { let cmd = null if (locator instanceof RelativeBy) { cmd = new command.Command( command.Name.FIND_ELEMENTS_RELATIVE ).setParameter('args', locator.marshall()) } else { locator = by.checkedLocator(locator) } if (typeof locator === 'function') { return this.findElementsInternal_(locator, this) } else if (cmd === null) { cmd = new command.Command(command.Name.FIND_ELEMENTS) .setParameter('using', locator.using) .setParameter('value', locator.value) } try { let res = await this.execute(cmd) return Array.isArray(res) ? res : [] } catch (ex) { if (ex instanceof error.NoSuchElementError) { return [] } throw ex } } /** * @param {!Function} locatorFn The locator function to use. * @param {!(WebDriver|WebElement)} context The search context. * @return {!Promise<!Array<!WebElement>>} A promise that will resolve to an * array of WebElements. * @private */ async findElementsInternal_(locatorFn, context) { const result = await locatorFn(context) if (result instanceof WebElement) { return [result] } if (!Array.isArray(result)) { return [] } return result.filter(function (item) { return item instanceof WebElement }) } /** @override */ takeScreenshot() { return this.execute(new command.Command(command.Name.SCREENSHOT)) } /** @override */ manage() { return new Options(this) } /** @override */ navigate() { return new Navigation(this) } /** @override */ switchTo() { return new TargetLocator(this) } validatePrintPageParams(keys, object) { let page = {} let margin = {} let data Object.keys(keys).forEach(function (key) { data = keys[key] let obj = { orientation: function () { object.orientation = data }, scale: function () { object.scale = data }, background: function () { object.background = data }, width: function () { page.width = data object.page = page }, height: function () { page.height = data object.page = page }, top: function () { margin.top = data object.margin = margin }, left: function () { margin.left = data object.margin = margin }, bottom: function () { margin.bottom = data object.margin = margin }, right: function () { margin.right = data object.margin = margin }, shrinkToFit: function () { object.shrinkToFit = data }, pageRanges: function () { object.pageRanges = data }, } if (!Object.prototype.hasOwnProperty.call(obj, key)) { throw new error.InvalidArgumentError(`Invalid Argument '${key}'`) } else { obj[key]() } }) return object } /** @override */ printPage(options = {}) { let keys = options let params = {} let resultObj let self = this resultObj = self.validatePrintPageParams(keys, params) return this.execute( new command.Command(command.Name.PRINT_PAGE).setParameters(resultObj) ) } /** * Creates a new WebSocket connection. * @return {!Promise<resolved>} A new CDP instance. */ async createCDPConnection(target) { const caps = await this.getCapabilities() const seCdp = caps['map_'].get('se:cdp') const vendorInfo = caps['map_'].get(this.VENDOR_COMMAND_PREFIX + ':chromeOptions') || caps['map_'].get(this.VENDOR_CAPABILITY_PREFIX + ':edgeOptions') || caps['map_'].get('moz:debuggerAddress') || new Map() const debuggerUrl = seCdp || vendorInfo['debuggerAddress'] || vendorInfo this._wsUrl = await this.getWsUrl(debuggerUrl, target) return new Promise((resolve, reject) => { try { this._wsConnection = new WebSocket(this._wsUrl) } catch (err) { reject(err) return } this._wsConnection.on('open', () => { this._cdpConnection = new cdp.CdpConnection(this._wsConnection) resolve(this._cdpConnection) }) this._wsConnection.on('error', (error) => { reject(error) }) }) } /** * Retrieves 'webSocketDebuggerUrl' by sending a http request using debugger address * @param {string} debuggerAddress * @param {string} target * @return {string} Returns parsed webSocketDebuggerUrl obtained from the http request */ async getWsUrl(debuggerAddress, target) { if (target && cdpTargets.indexOf(target.toLowerCase()) === -1) { throw new error.InvalidArgumentError('invalid target value') } const path = '/json/version' let request = new http.Request('GET', path) let client = new http.HttpClient('http://' + debuggerAddress) let response = await client.send(request) return JSON.parse(response.body)['webSocketDebuggerUrl'] } /** * Sets a listener for Fetch.authRequired event from CDP * If event is triggered, it enter username and password * and allows the test to move forward * @param {string} username * @param {string} password * @param connection CDP Connection */ async register(username, password, connection) { this._wsConnection.on('message', (message) => { const params = JSON.parse(message) if (params.method === 'Fetch.authRequired') { const requestParams = params['params'] connection.execute( 'Fetch.continueWithAuth', this.getRandomNumber(1, 10), { requestId: requestParams['requestId'], authChallengeResponse: { response: 'ProvideCredentials', username: username, password: password, }, } ) } else if (params.method === 'Fetch.requestPaused') { const requestPausedParams = params['params'] connection.execute( 'Fetch.continueRequest', this.getRandomNumber(1, 10), { requestId: requestPausedParams['requestId'], } ) } }) await connection.execute( 'Fetch.enable', 1, { handleAuthRequests: true, }, null ) await connection.execute( 'Network.setCacheDisabled', this.getRandomNumber(1, 10), { cacheDisabled: true, }, null ) } /** * Handle Network interception requests * @param connection WebSocket connection to the browser * @param httpResponse Object representing what we are intercepting * as well as what should be returned. * @param callback callback called when we intercept requests. */ async onIntercept(connection, httpResponse, callback) { this._wsConnection.on('message', (message) => { const params = JSON.parse(message) if (params.method === 'Fetch.requestPaused') { const requestPausedParams = params['params'] if (requestPausedParams.request.url == httpResponse.urlToIntercept) { connection.execute( 'Fetch.continueRequest', this.getRandomNumber(1, 10), { requestId: requestPausedParams['requestId'], url: httpResponse.urlToIntercept, method: httpResponse.method, headers: httpResponse.headers, postData: httpResponse.body } ) callback() } else { connection.execute( 'Fetch.continueRequest', this.getRandomNumber(1, 10), { requestId: requestPausedParams['requestId'], } ) } } }) await connection.execute( 'Fetch.enable', 1, {}, null ) await connection.execute( 'Network.setCacheDisabled', this.getRandomNumber(1, 10), { cacheDisabled: true, }, null ) } /** * * @param connection * @param callback * @returns {Promise<void>} */ async onLogEvent(connection, callback) { this._wsConnection.on('message', (message) => { const params = JSON.parse(message) if (params.method === 'Runtime.consoleAPICalled') { const consoleEventParams = params['params'] let event = { type: consoleEventParams['type'], timestamp: new Date(consoleEventParams['timestamp']), args: consoleEventParams['args'], } callback(event) } }) await connection.execute( 'Runtime.enable', this.getRandomNumber(1, 10), {}, null ) } /** * * @param connection * @param callback * @returns {Promise<void>} */ async onLogException(connection, callback) { await connection.execute( 'Runtime.enable', this.getRandomNumber(1, 10), {}, null ) this._wsConnection.on('message', (message) => { const params = JSON.parse(message) if (params.method === 'Runtime.exceptionThrown') { const exceptionEventParams = params['params'] let event = { exceptionDetails: exceptionEventParams['exceptionDetails'], timestamp: new Date(exceptionEventParams['timestamp']), } callback(event) } }) } /** * @param connection * @param callback * @returns {Promise<void>} */ async logMutationEvents(connection, callback) { await connection.execute( 'Runtime.enable', this.getRandomNumber(1, 10), {}, null ) await connection.execute( 'Page.enable', this.getRandomNumber(1, 10), {}, null ) await connection.execute( 'Runtime.addBinding', this.getRandomNumber(1, 10), { name: '__webdriver_attribute', }, null ) let mutationListener = '' try { // Depending on what is running the code it could appear in 2 different places which is why we try // here and then the other location mutationListener = fs .readFileSync( './javascript/node/selenium-webdriver/lib/atoms/mutation-listener.js', 'utf-8' ) .toString() } catch { mutationListener = fs .readFileSync( path.resolve(__dirname, './atoms/mutation-listener.js'), 'utf-8' ) .toString() } this.executeScript(mutationListener) await connection.execute( 'Page.addScriptToEvaluateOnNewDocument', this.getRandomNumber(1, 10), { source: mutationListener, }, null ) this._wsConnection.on('message', async (message) => { const params = JSON.parse(message) if (params.method === 'Runtime.bindingCalled') { let payload = JSON.parse(params['params']['payload']) let elements = await this.findElements({ css: '*[data-__webdriver_id=' + payload['target'], }) if (elements.length === 0) { return } let event = { element: elements[0], attribute_name: payload['name'], current_value: payload['value'], old_value: payload['oldValue'], } callback(event) } }) } getRandomNumber(min, max) { return Math.floor(Math.random() * (max - min + 1) + min) } } /** * Interface for navigating back and forth in the browser history. * * This class should never be instantiated directly. Instead, obtain an instance * with * * webdriver.navigate() * * @see WebDriver#navigate() */ class Navigation { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver } /** * Navigates to a new URL. * * @param {string} url The URL to navigate to. * @return {!Promise<void>} A promise that will be resolved when the URL * has been loaded. */ to(url) { return this.driver_.execute( new command.Command(command.Name.GET).setParameter('url', url) ) } /** * Moves backwards in the browser history. * * @return {!Promise<void>} A promise that will be resolved when the * navigation event has completed. */ back() { return this.driver_.execute(new command.Command(command.Name.GO_BACK)) } /** * Moves forwards in the browser history. * * @return {!Promise<void>} A promise that will be resolved when the * navigation event has completed. */ forward() { return this.driver_.execute(new command.Command(command.Name.GO_FORWARD)) } /** * Refreshes the current page. * * @return {!Promise<void>} A promise that will be resolved when the * navigation event has completed. */ refresh() { return this.driver_.execute(new command.Command(command.Name.REFRESH)) } } /** * Provides methods for managing browser and driver state. * * This class should never be instantiated directly. Instead, obtain an instance * with {@linkplain WebDriver#manage() webdriver.manage()}. */ class Options { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver } /** * Adds a cookie. * * __Sample Usage:__ * * // Set a basic cookie. * driver.manage().addCookie({name: 'foo', value: 'bar'}); * * // Set a cookie that expires in 10 minutes. * let expiry = new Date(Date.now() + (10 * 60 * 1000)); * driver.manage().addCookie({name: 'foo', value: 'bar', expiry}); * * // The cookie expiration may also be specified in seconds since epoch. * driver.manage().addCookie({ * name: 'foo', * value: 'bar', * expiry: Math.floor(Date.now() / 1000) * }); * * @param {!Options.Cookie} spec Defines the cookie to add. * @return {!Promise<void>} A promise that will be resolved * when the cookie has been added to the page. * @throws {error.InvalidArgumentError} if any of the cookie parameters are * invalid. * @throws {TypeError} if `spec` is not a cookie object. */ addCookie({ name, value, path, domain, secure, httpOnly, expiry, sameSite }) { // We do not allow '=' or ';' in the name. if (/[;=]/.test(name)) { throw new error.InvalidArgumentError('Invalid cookie name "' + name + '"') } // We do not allow ';' in value. if (/;/.test(value)) { throw new error.InvalidArgumentError( 'Invalid cookie value "' + value + '"' ) } if (typeof expiry === 'number') { expiry = Math.floor(expiry) } else if (expiry instanceof Date) { let date = /** @type {!Date} */ (expiry) expiry = Math.floor(date.getTime() / 1000) } if (sameSite && !['Strict', 'Lax', 'None'].includes(sameSite)) { throw new error.InvalidArgumentError( `Invalid sameSite cookie value '${sameSite}'. It should be one of "Lax", "Strict" or "None"` ) } if (sameSite === 'None' && !secure) { throw new error.InvalidArgumentError( 'Invalid cookie configuration: SameSite=None must be Secure' ) } return this.driver_.execute( new command.Command(command.Name.ADD_COOKIE).setParameter('cookie', { name: name, value: value, path: path, domain: domain, secure: !!secure, httpOnly: !!httpOnly, expiry: expiry, sameSite: sameSite, }) ) } /** * Deletes all cookies visible to the current page. * * @return {!Promise<void>} A promise that will be resolved * when all cookies have been deleted. */ deleteAllCookies() { return this.driver_.execute( new command.Command(command.Name.DELETE_ALL_COOKIES) ) } /** * Deletes the cookie with the given name. This command is a no-op if there is * no cookie with the given name visible to the current page. * * @param {string} name The name of the cookie to delete. * @return {!Promise<void>} A promise that will be resolved * when the cookie has been deleted. */ deleteCookie(name) { return this.driver_.execute( new command.Command(command.Name.DELETE_COOKIE).setParameter('name', name) ) } /** * Retrieves all cookies visible to the current page. Each cookie will be * returned as a JSON object as described by the WebDriver wire protocol. * * @return {!Promise<!Array<!Options.Cookie>>} A promise that will be * resolved with the cookies visible to the current browsing context. */ getCookies() { return this.driver_.execute( new command.Command(command.Name.GET_ALL_COOKIES) ) } /** * Retrieves the cookie with the given name. Returns null if there is no such * cookie. The cookie will be returned as a JSON object as described by the * WebDriver wire protocol. * * @param {string} name The name of the cookie to retrieve. * @return {!Promise<?Options.Cookie>} A promise that will be resolved * with the named cookie * @throws {error.NoSuchCookieError} if there is no such cookie. */ async getCookie(name) { try { const cookie = await this.driver_.execute( new command.Command(command.Name.GET_COOKIE).setParameter('name', name) ) return cookie } catch (err) { if ( !(err instanceof error.UnknownCommandError) && !(err instanceof error.UnsupportedOperationError) ) { throw err } const cookies = await this.getCookies() for (let cookie of cookies) { if (cookie && cookie['name'] === name) { return cookie } } return null } } /** * Fetches the timeouts currently configured for the current session. * * @return {!Promise<{script: number, * pageLoad: number, * implicit: number}>} A promise that will be * resolved with the timeouts currently configured for the current * session. * @see #setTimeouts() */ getTimeouts() { return this.driver_.execute(new command.Command(command.Name.GET_TIMEOUT)) } /** * Sets the timeout durations associated with the current session. * * The following timeouts are supported (all timeouts are specified in * milliseconds): * * - `implicit` specifies the maximum amount of time to wait for an element * locator to succeed when {@linkplain WebDriver#findElement locating} * {@linkplain WebDriver#findElements elements} on the page. * Defaults to 0 milliseconds. * * - `pageLoad` specifies the maximum amount of time to wait for a page to * finishing loading. Defaults to 300000 milliseconds. * * - `script` specifies the maximum amount of time to wait for an * {@linkplain WebDriver#executeScript evaluated script} to run. If set to * `null`, the script timeout will be indefinite. * Defaults to 30000 milliseconds. * * @param {{script: (number|null|undefined), * pageLoad: (number|null|undefined), * implicit: (number|null|undefined)}} conf * The desired timeout configuration. * @return {!Promise<void>} A promise that will be resolved when the timeouts * have been set. * @throws {!TypeError} if an invalid options object is provided. * @see #getTimeouts() * @see <https://w3c.github.io/webdriver/webdriver-spec.html#dfn-set-timeouts> */ setTimeouts({ script, pageLoad, implicit } = {}) { let cmd = new command.Command(command.Name.SET_TIMEOUT) let valid = false function setParam(key, value) { if (value === null || typeof value === 'number') { valid = true cmd.setParameter(key, value) } else if (typeof value !== 'undefined') { throw TypeError( 'invalid timeouts configuration:' + ` expected "${key}" to be a number, got ${typeof value}` ) } } setParam('implicit', implicit) setParam('pageLoad', pageLoad) setParam('script', script) if (valid) { return this.driver_.execute(cmd).catch(() => { // Fallback to the legacy method. let cmds = [] if (typeof script === 'number') { cmds.push(legacyTimeout(this.driver_, 'script', script)) } if (typeof implicit === 'number') { cmds.push(legacyTimeout(this.driver_, 'implicit', implicit)) } if (typeof pageLoad === 'number') { cmds.push(legacyTimeout(this.driver_, 'page load', pageLoad)) } return Promise.all(cmds) }) } throw TypeError('no timeouts specified') } /** * @return {!Logs} The interface for managing driver logs. */ logs() { return new Logs(this.driver_) } /** * @return {!Window} The interface for managing the current window. */ window() { return new Window(this.driver_) } } /** * @param {!WebDriver} driver * @param {string} type * @param {number} ms * @return {!Promise<void>} */ function legacyTimeout(driver, type, ms) { return driver.execute( new command.Command(command.Name.SET_TIMEOUT) .setParameter('type', type) .setParameter('ms', ms) ) } /** * A record object describing a browser cookie. * * @record */ Options.Cookie = function () { } /** * The name of the cookie. * * @type {string} */ Options.Cookie.prototype.name /** * The cookie value. * * @type {string} */ Options.Cookie.prototype.value /** * The cookie path. Defaults to "/" when adding a cookie. * * @type {(string|undefined)} */ Options.Cookie.prototype.path /** * The domain the cookie is visible to. Defaults to the current browsing * context's document's URL when adding a cookie. * * @type {(string|undefined)} */ Options.Cookie.prototype.domain /** * Whether the cookie is a secure cookie. Defaults to false when adding a new * cookie. * * @type {(boolean|undefined)} */ Options.Cookie.prototype.secure /** * Whether the cookie is an HTTP only cookie. Defaults to false when adding a * new cookie. * * @type {(boolean|undefined)} */ Options.Cookie.prototype.httpOnly /** * When the cookie expires. * * When {@linkplain Options#addCookie() adding a cookie}, this may be specified * as a {@link Date} object, or in _seconds_ since Unix epoch (January 1, 1970). * * The expiry is always returned in seconds since epoch when * {@linkplain Options#getCookies() retrieving cookies} from the browser. * * @type {(!Date|number|undefined)} */ Options.Cookie.prototype.expiry /** * When the cookie applies to a SameSite policy. * * When {@linkplain Options#addCookie() adding a cookie}, this may be specified * as a {@link string} object which is one of 'Lax', 'Strict' or 'None'. * * * @type {(string|undefined)} */ Options.Cookie.prototype.sameSite /** * An interface for managing the current window. * * This class should never be instantiated directly. Instead, obtain an instance * with * * webdriver.manage().window() * * @see WebDriver#manage() * @see Options#window() */ class Window { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver } /** * Retrieves the a rect describing the current top-level window's size and * position. * * @return {!Promise<{x: number, y: number, width: number, height: number}>} * A promise that will resolve to the window rect of the current window. */ async getRect() { try { return await this.driver_.execute( new command.Command(command.Name.GET_WINDOW_RECT) ) } catch (ex) { if (ex instanceof error.UnknownCommandError) { let { width, height } = await this.driver_.execute( new command.Command(command.Name.GET_WINDOW_SIZE).setParameter( 'windowHandle', 'current' ) ) let { x, y } = await this.driver_.execute( new command.Command(command.Name.GET_WINDOW_POSITION).setParameter( 'windowHandle', 'current' ) ) return { x, y, width, height } } throw ex } } /** * Sets the current top-level window's size and position. You may update just * the size by omitting `x` & `y`, or just the position by omitting * `width` & `height` options. * * @param {{x: (number|undefined), * y: (number|undefined), * width: (number|undefined), * height: (number|undefined)}} options * The desired window size and position. * @return {!Promise<{x: number, y: number, width: number, height: number}>} * A promise that will resolve to the current window's updated window * rect. */ async setRect({ x, y, width, height }) { try { return await this.driver_.execute( new command.Command(command.Name.SET_WINDOW_RECT).setParameters({ x, y, width, height, }) ) } catch (ex) { if (ex instanceof error.UnknownCommandError) { if (typeof x === 'number' && typeof y === 'number') { await this.driver_.execute( new command.Command(command.Name.SET_WINDOW_POSITION) .setParameter('windowHandle', 'current') .setParameter('x', x) .setParameter('y', y) ) } if (typeof width === 'number' && typeof height === 'number') { await this.driver_.execute( new command.Command(command.Name.SET_WINDOW_SIZE) .setParameter('windowHandle', 'current') .setParameter('width', width) .setParameter('height', height) ) } return this.getRect() } throw ex } } /** * Maximizes the current window. The exact behavior of this command is * specific to individual window managers, but typically involves increasing * the window to the maximum available size without going full-screen. * * @return {!Promise<void>} A promise that will be resolved when the command * has completed. */ maximize() { return this.driver_.execute( new command.Command(command.Name.MAXIMIZE_WINDOW).setParameter( 'windowHandle', 'current' ) ) } /** * Minimizes the current window. The exact behavior of this command is * specific to individual window managers, but typically involves hiding * the window in the system tray. * * @return {!Promise<void>} A promise that will be resolved when the command * has completed. */ minimize() { return this.driver_.execute( new command.Command(command.Name.MINIMIZE_WINDOW) ) } /** * Invokes the "full screen" operation on the current window. The exact * behavior of this command is specific to individual window managers, but * this will typically increase the window size to the size of the physical * display and hide the browser chrome. * * @return {!Promise<void>} A promise that will be resolved when the command * has completed. * @see <https://fullscreen.spec.whatwg.org/#fullscreen-an-element> */ fullscreen() { return this.driver_.execute( new command.Command(command.Name.FULLSCREEN_WINDOW) ) } } /** * Interface for managing WebDriver log records. * * This class should never be instantiated directly. Instead, obtain an * instance with * * webdriver.manage().logs() * * @see WebDriver#manage() * @see Options#logs() */ class Logs { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver } /** * Fetches available log entries for the given type. * * Note that log buffers are reset after each call, meaning that available * log entries correspond to those entries not yet returned for a given log * type. In practice, this means that this call will return the available log * entries since the last call, or from the start of the session. * * @param {!logging.Type} type The desired log type. * @return {!Promise<!Array.<!logging.Entry>>} A * promise that will resolve to a list of log entries for the specified * type. */ get(type) { let cmd = new command.Command(command.Name.GET_LOG).setParameter( 'type', type ) return this.driver_.execute(cmd).then(function (entries) { return entries.map(function (entry) { if (!(entry instanceof logging.Entry)) { return new logging.Entry( entry['level'], entry['message'], entry['timestamp'], entry['type'] ) } return entry }) }) } /** * Retrieves the log types available to this driver. * @return {!Promise<!Array<!logging.Type>>} A * promise that will resolve to a list of available log types. */ getAvailableLogTypes() { return this.driver_.execute( new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES) ) } } /** * An interface for changing the focus of the driver to another frame or window. * * This class should never be instantiated directly. Instead, obtain an * instance with * * webdriver.switchTo() * * @see WebDriver#switchTo() */ class TargetLocator { /** * @param {!WebDriver} driver The parent driver. * @private */ constructor(driver) { /** @private {!WebDriver} */ this.driver_ = driver } /** * Locates the DOM element on the current page that corresponds to * `document.activeElement` or `document.body` if the active element is not * available. * * @return {!WebElementPromise} The active element. */ activeElement() { var id = this.driver_.execute( new command.Command(command.Name.GET_ACTIVE_ELEMENT) ) return new WebElementPromise(this.driver_, id) } /** * Switches focus of all future commands to the topmost frame in the current * window. * * @return {!Promise<void>} A promise that will be resolved * when the driver has changed focus to the default content. */ defaultContent() { return this.driver_.execute( new command.Command(command.Name.SWITCH_TO_FRAME).setParameter('id', null) ) } /** * Changes the focus of all future commands to another frame on the page. The * target frame may be specified as one of the following: * * - A number that specifies a (zero-based) index into [window.frames]( * https://developer.mozilla.org/en-US/docs/Web/API/Window.frames). * - A {@link WebElement} reference, which correspond to a `frame` or `iframe` * DOM element. * - The `null` value, to select the topmost frame on the page. Passing `null` * is the same as calling {@link #defaultContent defaultContent()}. * * If the specified frame can not be found, the returned promise will be * rejected with a {@linkplain error.NoSuchFrameError}. * * @param {(number|WebElement|null)} id The frame locator. * @return {!Promise<void>} A promise that will be resolved * when the driver has changed focus to the specified frame. */ frame(id) { return this.driver_.execute( new command.Command(command.Name.SWITCH_TO_FRAME).setParameter('id', id) ) } /** * Changes the focus of all future commands to the parent frame of the * currently selected frame. This command has no effect if the driver is * already focused on the top-level browsing context. * * @return {!Promise<void>} A promise that will be resolved when the command * has completed. */ parentFrame() { return this.driver_.execute( new command.Command(command.Name.SWITCH_TO_FRAME_PARENT) ) } /** * Changes the focus of all future commands to another window. Windows may be * specified by their {@code window.name} attribute or by its handle * (as returned by {@link WebDriver#getWindowHandles}). * * If the specified window cannot be found, the returned promise will be * rejected with a {@linkplain error.NoSuchWindowError}. * * @param {string} nameOrHandle The name or window handle of the window to * switch focus to. * @return {!Promise<void>} A promise that will be resolved * when the driver has changed focus to the specified window. */ window(nameOrHandle) { return this.driver_.execute( new command.Command(command.Name.SWITCH_TO_WINDOW) // "name" supports the legacy drivers. "handle" is the W3C // compliant parameter. .setParameter('name', nameOrHandle) .setParameter('handle', nameOrHandle) ) } /** * Creates a new browser window and switches the focus for future * commands of this driver to the new window. * * @param {string} typeHint 'window' or 'tab'. The created window is not * guaranteed to be of the requested type; if the driver does not support * the requested type, a new browser window will be created of whatever type * the driver does support. * @return {!Promise<void>} A promise that will be resolved * when the driver has changed focus to the new window. */ newWindow(typeHint) { var driver = this.driver_ return this.driver_ .execute( new command.Command(command.Name.SWITCH_TO_NEW_WINDOW).setParameter( 'type', typeHint ) ) .then(function (response) { return driver.switchTo().window(response.handle) }) } /** * Changes focus to the active modal dialog, such as those opened by * `window.alert()`, `window.confirm()`, and `window.prompt()`. The returned * promise will be rejected with a * {@linkplain error.NoSuchAlertError} if there are no open alerts. * * @return {!AlertPromise} The open alert. */ alert() { var text = this.driver_.execute( new command.Command(command.Name.GET_ALERT_TEXT) ) var driver = this.driver_ return new AlertPromise( driver, text.then(function (text) { return new Alert(driver, text) }) ) } } ////////////////////////////////////////////////////////////////////////////// // // WebElement // ////////////////////////////////////////////////////////////////////////////// const LEGACY_ELEMENT_ID_KEY = 'ELEMENT' const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf' /** * Represents a DOM element. WebElements can be found by searching from the * document root using a {@link WebDriver} instance, or by searching * under another WebElement: * * driver.get('http://www.google.com'); * var searchForm = driver.findElement(By.tagName('form')); * var searchBox = searchForm.findElement(By.name('q')); * searchBox.sendKeys('webdriver'); */ class WebElement { /** * @param {!WebDriver} driver the parent WebDriver instance for this element. * @param {(!IThenable<string>|string)} id The server-assigned opaque ID for * the underlying DOM element. */ constructor(driver, id) { /** @private {!WebDriver} */ this.driver_ = driver /** @private {!Promise<string>} */ this.id_ = Promise.resolve(id) } /** * @param {string} id The raw ID. * @param {boolean=} noLegacy Whether to exclude the legacy element key. * @return {!Object} The element ID for use with WebDriver's wire protocol. */ static buildId(id, noLegacy = false) { return noLegacy ? { [ELEMENT_ID_KEY]: id } : { [ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id } } /** * Extracts the encoded WebElement ID from the object. * * @param {?} obj The object to extract the ID from. * @return {string} the extracted ID. * @throws {TypeError} if the object is not a valid encoded ID. */ static extractId(obj) { if (obj && typeof obj === 'object') { if (typeof obj[ELEMENT_ID_KEY] === 'string') { return obj[ELEMENT_ID_KEY] } else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') { return obj[LEGACY_ELEMENT_ID_KEY] } } throw new TypeError('object is not a WebElement ID') } /** * @param {?} obj the object to test. * @return {boolean} whether the object is a valid encoded WebElement ID. */ static isId(obj) { return ( obj && typeof obj === 'object' && (typeof obj[ELEMENT_ID_KEY] === 'string' || typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') ) } /** * Compares two WebElements for equality. * * @param {!WebElement} a A WebElement. * @param {!WebElement} b A WebElement. * @return {!Promise<boolean>} A promise that will be * resolved to whether the two WebElements are equal. */ static async equals(a, b) { if (a === b) { return true } return a.driver_.executeScript('return arguments[0] === arguments[1]', a, b) } /** @return {!WebDriver} The parent driver for this instance. */ getDriver() { return this.driver_ } /** * @return {!Promise<string>} A promise that resolves to * the server-assigned opaque ID assigned to this element. */ getId() { return this.id_ } /** * @return {!Object} Returns the serialized representation of this WebElement. */ [Symbols.serialize]() { return this.getId().then(WebElement.buildId) } /** * Schedules a command that targets this element with the parent WebDriver * instance. Will ensure this element's ID is included in the command * parameters under the "id" key. * * @param {!command.Command} command The command to schedule. * @return {!Promise<T>} A promise that will be resolved with the result. * @template T * @see WebDriver#schedule * @private */ execute_(command) { command.setParameter('id', this) return this.driver_.execute(command) } /** * Schedule a command to find a descendant of this element. If the element * cannot be found, the returned promise will be rejected with a * {@linkplain error.NoSuchElementError NoSuchElementError}. * * The search criteria for an element may be defined using one of the static * factories on the {@link by.By} class, or as a short-hand * {@link ./by.ByHash} object. For example, the following two statements * are equivalent: * * var e1 = element.findElement(By.id('foo')); * var e2 = element.findElement({id:'foo'}); * * You may also provide a custom locator function, which takes as input this * instance and returns a {@link WebElement}, or a promise that will resolve * to a WebElement. If the returned promise resolves to an array of * WebElements, WebDriver will use the first element. For example, to find the * first visible link on a page, you could write: * * var link = element.findElement(firstVisibleLink); * * function firstVisibleLink(element) { * var links = element.findElements(By.tagName('a')); * return promise.filter(links, function(link) { * return link.isDisplayed(); * }); * } * * @param {!(by.By|Function)} locator The locator strategy to use when * searching for the element. * @return {!WebElementPromise} A WebElement that can be used to issue * commands against the located element. If the element is not found, the * element will be invalidated and all scheduled commands aborted. */ findElement(locator) { locator = by.checkedLocator(locator) let id if (typeof locator === 'function') { id = this.driver_.findElementInternal_(locator, this) } else { let cmd = new command.Command(command.Name.FIND_CHILD_ELEMENT) .setParameter('using', locator.using) .setParameter('value', locator.value) id = this.execute_(cmd) } return new WebElementPromise(this.driver_, id) } /** * Locates all of the descendants of this element that match the given search * criteria. * * @param {!(by.By|Function)} locator The locator strategy to use when * searching for the element. * @return {!Promise<!Array<!WebElement>>} A promise that will resolve to an * array of WebElements. */ async findElements(locator) { locator = by.checkedLocator(locator) if (typeof locator === 'function') { return this.driver_.findElementsInternal_(locator, this) } else { let cmd = new command.Command(command.Name.FIND_CHILD_ELEMENTS) .setParameter('using', locator.using) .setParameter('value', locator.value) let result = await this.execute_(cmd) return Array.isArray(result) ? result : [] } } /** * Clicks on this element. * * @return {!Promise<void>} A promise that will be resolved when the click * command has completed. */ click() { return this.execute_(new command.Command(command.Name.CLICK_ELEMENT)) } /** * Types a key sequence on the DOM element represented by this instance. * * Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is * processed in the key sequence, that key state is toggled until one of the * following occurs: * * - The modifier key is encountered again in the sequence. At this point the * state of the key is toggled (along with the appropriate keyup/down * events). * - The {@link input.Key.NULL} key is encountered in the sequence. When * this key is encountered, all modifier keys current in the down state are * released (with accompanying keyup events). The NULL key can be used to * simulate common keyboard shortcuts: * * element.sendKeys("text was", * Key.CONTROL, "a", Key.NULL, * "now text is"); * // Alternatively: * element.sendKeys("text was", * Key.chord(Key.CONTROL, "a"), * "now text is"); * * - The end of the key sequence is encountered. When there are no more keys * to type, all depressed modifier keys are released (with accompanying * keyup events). * * If this element is a file input ({@code <input type="file">}), the * specified key sequence should specify the path to the file to attach to * the element. This is analogous to the user clicking "Browse..." and entering * the path into the file select dialog. * * var form = driver.findElement(By.css('form')); * var element = form.findElement(By.css('input[type=file]')); * element.sendKeys('/path/to/file.txt'); * form.submit(); * * For uploads to function correctly, the entered path must reference a file * on the _browser's_ machine, not the local machine running this script. When * running against a remote Selenium server, a {@link input.FileDetector} * may be used to transparently copy files to the remote machine before * attempting to upload them in the browser. * * __Note:__ On browsers where native keyboard events are not supported * (e.g. Firefox on OS X), key events will be synthesized. Special * punctuation keys will be synthesized according to a standard QWERTY en-us * keyboard layout. * * @param {...(number|string|!IThenable<(number|string)>)} args The * sequence of keys to type. Number keys may be referenced numerically or * by string (1 or '1'). All arguments will be joined into a single * sequence. * @return {!Promise<void>} A promise that will be resolved when all keys * have been typed. */ async sendKeys(...args) { let keys = [] ; (await Promise.all(args)).forEach((key) => { let type = typeof key if (type === 'number') { key = String(key) } else if (type !== 'string') { throw TypeError('each key must be a number of string; got ' + type) } // The W3C protocol requires keys to be specified as an array where // each element is a single key. keys.push(...key.split('')) }) if (!this.driver_.fileDetector_) { return this.execute_( new command.Command(command.Name.SEND_KEYS_TO_ELEMENT) .setParameter('text', keys.join('')) .setParameter('value', keys) ) } keys = await this.driver_.fileDetector_.handleFile( this.driver_, keys.join('') ) return this.execute_( new command.Command(command.Name.SEND_KEYS_TO_ELEMENT) .setParameter('text', keys) .setParameter('value', keys.split('')) ) } /** * Retrieves the element's tag name. * * @return {!Promise<string>} A promise that will be resolved with the * element's tag name. */ getTagName() { return this.execute_(new command.Command(command.Name.GET_ELEMENT_TAG_NAME)) } /** * Retrieves the value of a computed style property for this instance. If * the element inherits the named style from its parent, the parent will be * queried for its value. Where possible, color values will be converted to * their hex representation (e.g. #00ff00 instead of rgb(0, 255, 0)). * * _Warning:_ the value returned will be as the browser interprets it, so * it may be tricky to form a proper assertion. * * @param {string} cssStyleProperty The name of the CSS style property to look * up. * @return {!Promise<string>} A promise that will be resolved with the * requested CSS value. */ getCssValue(cssStyleProperty) { var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY return this.execute_( new command.Command(name).setParameter('propertyName', cssStyleProperty) ) } /** * Retrieves the current value of the given attribute of this element. * Will return the current value, even if it has been modified after the page * has been loaded. More exactly, this method will return the value * of the given attribute, unless that attribute is not present, in which case * the value of the property with the same name is returned. If neither value * is set, null is returned (for example, the "value" property of a textarea * element). The "style" attribute is converted as best can be to a * text representation with a trailing semi-colon. The following are deemed to * be "boolean" attributes and will return either "true" or null: * * async, autofocus, autoplay, checked, compact, complete, controls, declare, * defaultchecked, defaultselected, defer, disabled, draggable, ended, * formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope, * loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open, * paused, pubdate, readonly, required, reversed, scoped, seamless, seeking, * selected, spellcheck, truespeed, willvalidate * * Finally, the following commonly mis-capitalized attribute/property names * are evaluated as expected: * * - "class" * - "readonly" * * @param {string} attributeName The name of the attribute to query. * @return {!Promise<?string>} A promise that will be * resolved with the attribute's value. The returned value will always be * either a string or null. */ getAttribute(attributeName) { return this.execute_( new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).setParameter( 'name', attributeName ) ) } /** * Get the given property of the referenced web element * @param {string} propertyName The name of the attribute to query. * @return {!Promise<string>} A promise that will be * resolved with the element's property value */ getProperty(propertyName) { return this.execute_( new command.Command(command.Name.GET_ELEMENT_PROPERTY).setParameter( 'name', propertyName ) ) } /** * Get the visible (i.e. not hidden by CSS) innerText of this element, * including sub-elements, without any leading or trailing whitespace. * * @return {!Promise<string>} A promise that will be * resolved with the element's visible text. */ getText() { return this.execute_(new command.Command(command.Name.GET_ELEMENT_TEXT)) } /** * Get the computed WAI-ARIA role of element. * * @return {!Promise<string>} A promise that will be * resolved with the element's computed role. */ getAriaRole() { return this.execute_(new command.Command(command.Name.GET_COMPUTED_ROLE)) } /** * Get the computed WAI-ARIA label of element. * * @return {!Promise<string>} A promise that will be * resolved with the element's computed label. */ getAccessibleName() { return this.execute_(new command.Command(command.Name.GET_COMPUTED_LABEL)) } /** * Returns an object describing an element's location, in pixels relative to * the document element, and the element's size in pixels. * * @return {!Promise<{width: number, height: number, x: number, y: number}>} * A promise that will resolve with the element's rect. */ async getRect() { try { return await this.execute_( new command.Command(command.Name.GET_ELEMENT_RECT) ) } catch (err) { if (err instanceof error.UnknownCommandError) { const { width, height } = await this.execute_( new command.Command(command.Name.GET_ELEMENT_SIZE) ) const { x, y } = await this.execute_( new command.Command(command.Name.GET_ELEMENT_LOCATION) ) return { x, y, width, height } } } } /** * Tests whether this element is enabled, as dictated by the `disabled` * attribute. * * @return {!Promise<boolean>} A promise that will be * resolved with whether this element is currently enabled. */ isEnabled() { return this.execute_(new command.Command(command.Name.IS_ELEMENT_ENABLED)) } /** * Tests whether this element is selected. * * @return {!Promise<boolean>} A promise that will be * resolved with whether this element is currently selected. */ isSelected() { return this.execute_(new command.Command(command.Name.IS_ELEMENT_SELECTED)) } /** * Submits the form containing this element (or this element if it is itself * a FORM element). his command is a no-op if the element is not contained in * a form. * * @return {!Promise<void>} A promise that will be resolved * when the form has been submitted. */ submit() { return this.execute_(new command.Command(command.Name.SUBMIT_ELEMENT)) } /** * Clear the `value` of this element. This command has no effect if the * underlying DOM element is neither a text INPUT element nor a TEXTAREA * element. * * @return {!Promise<void>} A promise that will be resolved * when the element has been cleared. */ clear() { return this.execute_(new command.Command(command.Name.CLEAR_ELEMENT)) } /** * Test whether this element is currently displayed. * * @return {!Promise<boolean>} A promise that will be * resolved with whether this element is currently visible on the page. */ isDisplayed() { return this.execute_(new command.Command(command.Name.IS_ELEMENT_DISPLAYED)) } /** * Take a screenshot of the visible region encompassed by this element's * bounding rectangle. * * @return {!Promise<string>} A promise that will be * resolved to the screenshot as a base-64 encoded PNG. */ takeScreenshot() { return this.execute_( new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT) ) } } /** * WebElementPromise is a promise that will be fulfilled with a WebElement. * This serves as a forward proxy on WebElement, allowing calls to be * scheduled without directly on this instance before the underlying * WebElement has been fulfilled. In other words, the following two statements * are equivalent: * * driver.findElement({id: 'my-button'}).click(); * driver.findElement({id: 'my-button'}).then(function(el) { * return el.click(); * }); * * @implements {IThenable<!WebElement>} * @final */ class WebElementPromise extends WebElement { /** * @param {!WebDriver} driver The parent WebDriver instance for this * element. * @param {!Promise<!WebElement>} el A promise * that will resolve to the promised element. */ constructor(driver, el) { super(driver, 'unused') /** @override */ this.then = el.then.bind(el) /** @override */ this.catch = el.catch.bind(el) /** * Defers returning the element ID until the wrapped WebElement has been * resolved. * @override */ this.getId = function () { return el.then(function (el) { return el.getId() }) } } } ////////////////////////////////////////////////////////////////////////////// // // Alert // ////////////////////////////////////////////////////////////////////////////// /** * Represents a modal dialog such as {@code alert}, {@code confirm}, or * {@code prompt}. Provides functions to retrieve the message displayed with * the alert, accept or dismiss the alert, and set the response text (in the * case of {@code prompt}). */ class Alert { /** * @param {!WebDriver} driver The driver controlling the browser this alert * is attached to. * @param {string} text The message text displayed with this alert. */ constructor(driver, text) { /** @private {!WebDriver} */ this.driver_ = driver /** @private {!Promise<string>} */ this.text_ = Promise.resolve(text) } /** * Retrieves the message text displayed with this alert. For instance, if the * alert were opened with alert("hello"), then this would return "hello". * * @return {!Promise<string>} A promise that will be * resolved to the text displayed with this alert. */ getText() { return this.text_ } /** * Accepts this alert. * * @return {!Promise<void>} A promise that will be resolved * when this command has completed. */ accept() { return this.driver_.execute(new command.Command(command.Name.ACCEPT_ALERT)) } /** * Dismisses this alert. * * @return {!Promise<void>} A promise that will be resolved * when this command has completed. */ dismiss() { return this.driver_.execute(new command.Command(command.Name.DISMISS_ALERT)) } /** * Sets the response text on this alert. This command will return an error if * the underlying alert does not support response text (e.g. window.alert and * window.confirm). * * @param {string} text The text to set. * @return {!Promise<void>} A promise that will be resolved * when this command has completed. */ sendKeys(text) { return this.driver_.execute( new command.Command(command.Name.SET_ALERT_TEXT).setParameter( 'text', text ) ) } } /** * AlertPromise is a promise that will be fulfilled with an Alert. This promise * serves as a forward proxy on an Alert, allowing calls to be scheduled * directly on this instance before the underlying Alert has been fulfilled. In * other words, the following two statements are equivalent: * * driver.switchTo().alert().dismiss(); * driver.switchTo().alert().then(function(alert) { * return alert.dismiss(); * }); * * @implements {IThenable<!Alert>} * @final */ class AlertPromise extends Alert { /** * @param {!WebDriver} driver The driver controlling the browser this * alert is attached to. * @param {!Promise<!Alert>} alert A thenable * that will be fulfilled with the promised alert. */ constructor(driver, alert) { super(driver, 'unused') /** @override */ this.then = alert.then.bind(alert) /** @override */ this.catch = alert.catch.bind(alert) /** * Defer returning text until the promised alert has been resolved. * @override */ this.getText = function () { return alert.then(function (alert) { return alert.getText() }) } /** * Defers action until the alert has been located. * @override */ this.accept = function () { return alert.then(function (alert) { return alert.accept() }) } /** * Defers action until the alert has been located. * @override */ this.dismiss = function () { return alert.then(function (alert) { return alert.dismiss() }) } /** * Defers action until the alert has been located. * @override */ this.sendKeys = function (text) { return alert.then(function (alert) { return alert.sendKeys(text) }) } } } // PUBLIC API module.exports = { Alert, AlertPromise, Condition, Logs, Navigation, Options, TargetLocator, IWebDriver, WebDriver, WebElement, WebElementCondition, WebElementPromise, Window, }
1
19,062
if we are returning the `ws` here when passing in `se:cdp` we can just return it straight or do we have to make a request to get the `ws` address?
SeleniumHQ-selenium
java
@@ -20,6 +20,6 @@ * Internal dependencies */ import Data from 'googlesitekit-data'; -import { registerStore } from 'assets/js/googlesitekit/datastore/site'; +import { registerStore } from './googlesitekit/datastore/site'; registerStore( Data );
1
/** * Entrypoint for the "core/site" data store. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Internal dependencies */ import Data from 'googlesitekit-data'; import { registerStore } from 'assets/js/googlesitekit/datastore/site'; registerStore( Data );
1
40,126
I could add a resolver for this (like above) but it seems more sensible to just change the one reference!
google-site-kit-wp
js
@@ -44,11 +44,14 @@ public class TimestampMoreRecentThanParent implements DetachedBlockHeaderValidat private boolean validateHeaderSufficientlyAheadOfParent( final long timestamp, final long parentTimestamp) { - if ((timestamp - minimumSecondsSinceParent) < parentTimestamp) { + final long actualTimeSinceParent = timestamp - parentTimestamp; + if (actualTimeSinceParent < minimumSecondsSinceParent) { LOG.trace( - "Invalid block header: timestamp {} is not sufficiently newer than parent timestamp {}", + "Invalid block header: timestamp {} is only {} seconds newer than parent timestamp {}. Minimum {} seconds", timestamp, - parentTimestamp); + actualTimeSinceParent, + parentTimestamp, + minimumSecondsSinceParent); return false; }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.mainnet.headervalidationrules; import static com.google.common.base.Preconditions.checkArgument; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.mainnet.DetachedBlockHeaderValidationRule; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; /** Responsible for ensuring the timestamp of a block is newer than its parent. */ public class TimestampMoreRecentThanParent implements DetachedBlockHeaderValidationRule { private static final Logger LOG = LogManager.getLogger(); private final long minimumSecondsSinceParent; public TimestampMoreRecentThanParent(final long minimumSecondsSinceParent) { checkArgument(minimumSecondsSinceParent >= 0, "minimumSecondsSinceParent must be positive"); this.minimumSecondsSinceParent = minimumSecondsSinceParent; } @Override public boolean validate(final BlockHeader header, final BlockHeader parent) { return validateTimestamp(header.getTimestamp(), parent.getTimestamp()); } private boolean validateTimestamp(final long timestamp, final long parentTimestamp) { return validateHeaderSufficientlyAheadOfParent(timestamp, parentTimestamp); } private boolean validateHeaderSufficientlyAheadOfParent( final long timestamp, final long parentTimestamp) { if ((timestamp - minimumSecondsSinceParent) < parentTimestamp) { LOG.trace( "Invalid block header: timestamp {} is not sufficiently newer than parent timestamp {}", timestamp, parentTimestamp); return false; } return true; } }
1
23,836
not sure "actual" adds value - secondsSinceParent is probably closer.
hyperledger-besu
java
@@ -159,7 +159,7 @@ module Blacklight::SearchHelper # Get the solr response when retrieving only a single facet field # @return [Blacklight::SolrResponse] the solr response def get_facet_field_response(facet_field, user_params = params || {}, extra_controller_params = {}) - query = search_builder.with(user_params).merge(extra_controller_params).merge(solr_facet_params(facet_field, user_params, extra_controller_params)) + query = search_builder.with(user_params).facet(facet_field).merge(extra_controller_params) repository.search(query) end
1
# -*- encoding : utf-8 -*- # SearchHelper is a controller layer mixin. It is in the controller scope: request params, session etc. # # NOTE: Be careful when creating variables here as they may be overriding something that already exists. # The ActionController docs: http://api.rubyonrails.org/classes/ActionController/Base.html # # Override these methods in your own controller for customizations: # # class CatalogController < ActionController::Base # # include Blacklight::Catalog # # def repository_class # MyAlternativeRepo # end # end # # Or by including in local extensions: # module LocalSearchHelperExtension # [ local overrides ] # end # # class CatalogController < ActionController::Base # # include Blacklight::Catalog # include LocalSearchHelperExtension # # def repository_class # MyAlternativeRepo # end # end # # Or by using ActiveSupport::Concern: # # module LocalSearchHelperExtension # extend ActiveSupport::Concern # include Blacklight::SearchHelper # # [ local overrides ] # end # # class CatalogController < ApplicationController # include LocalSearchHelperExtension # include Blacklight::Catalog # end module Blacklight::SearchHelper extend ActiveSupport::Concern extend Deprecation self.deprecation_horizon = 'blacklight 6.0' include Blacklight::RequestBuilders ## # Execute a solr query # @see [Blacklight::SolrRepository#send_and_receive] # @return [Blacklight::SolrResponse] the solr response object def find *args request_params = args.extract_options! path = args.first || blacklight_config.solr_path request_params[:qt] ||= blacklight_config.qt repository.send_and_receive path, request_params end deprecation_deprecate :find # returns a params hash for finding a single solr document (CatalogController #show action) def solr_doc_params(id=nil) default_solr_doc_params(id) end deprecation_deprecate :solr_doc_params # a solr query method # given a user query, return a solr response containing both result docs and facets # - mixes in the Blacklight::Solr::SpellingSuggestions module # - the response will have a spelling_suggestions method # Returns a two-element array (aka duple) with first the solr response object, # and second an array of SolrDocuments representing the response.docs def get_search_results(user_params = params || {}, extra_controller_params = {}) query = search_builder.with(user_params).merge(extra_controller_params) response = repository.search(query) case when (response.grouped? && grouped_key_for_results) [response.group(grouped_key_for_results), []] when (response.grouped? && response.grouped.length == 1) [response.grouped.first, []] else [response, response.documents] end end deprecation_deprecate get_search_results: :search_results # a solr query method # @param [Hash,HashWithIndifferentAccess] user_params ({}) the user provided parameters (e.g. query, facets, sort, etc) # @param [Hash,HashWithIndifferentAccess] extra_controller_params ({}) extra parameters to add to the search # @param [List<Symbol] processor_chain a list of filter methods to run # @return [Blacklight::SolrResponse] the solr response object def search_results(user_params, search_params_logic) builder = search_builder(search_params_logic).with(user_params) builder.page(user_params[:page]) if user_params[:page] builder.rows(user_params[:per_page] || user_params[:rows]) if user_params[:per_page] or user_params[:rows] response = repository.search(builder) case when (response.grouped? && grouped_key_for_results) [response.group(grouped_key_for_results), []] when (response.grouped? && response.grouped.length == 1) [response.grouped.first, []] else [response, response.documents] end end # a solr query method # @param [Hash,HashWithIndifferentAccess] user_params ({}) the user provided parameters (e.g. query, facets, sort, etc) # @param [Hash,HashWithIndifferentAccess] extra_controller_params ({}) extra parameters to add to the search # @return [Blacklight::SolrResponse] the solr response object def query_solr(user_params = params || {}, extra_controller_params = {}) query = search_builder.with(user_params).merge(extra_controller_params) repository.search(query) end deprecation_deprecate :query_solr # retrieve a document, given the doc id # @return [Blacklight::SolrResponse, Blacklight::SolrDocument] the solr response object and the first document def fetch(id=nil, extra_controller_params={}) if id.is_a? Array fetch_many(id, params, extra_controller_params) else if id.nil? Deprecation.warn Blacklight::SearchHelper, "Calling #fetch without an explicit id argument is deprecated and will be removed in Blacklight 6.0" id ||= params[:id] end fetch_one(id, extra_controller_params) end end alias_method :get_solr_response_for_doc_id, :fetch deprecation_deprecate get_solr_response_for_doc_id: "use fetch(id) instead" # given a field name and array of values, get the matching SOLR documents # @return [Blacklight::SolrResponse, Array<Blacklight::SolrDocument>] the solr response object and a list of solr documents def get_solr_response_for_field_values(field, values, extra_controller_params = {}) query = Deprecation.silence(Blacklight::RequestBuilders) do search_builder.with(params).merge(extra_controller_params).merge(solr_documents_by_field_values_params(field, values)) end solr_response = repository.search(query) [solr_response, solr_response.documents] end deprecation_deprecate :get_solr_response_for_field_values ## # Get the solr response when retrieving only a single facet field # @return [Blacklight::SolrResponse] the solr response def get_facet_field_response(facet_field, user_params = params || {}, extra_controller_params = {}) query = search_builder.with(user_params).merge(extra_controller_params).merge(solr_facet_params(facet_field, user_params, extra_controller_params)) repository.search(query) end # a solr query method # used to paginate through a single facet field's values # /catalog/facet/language_facet def get_facet_pagination(facet_field, user_params=params || {}, extra_controller_params={}) # Make the solr call response = get_facet_field_response(facet_field, user_params, extra_controller_params) limit = response.params[:"f.#{facet_field}.facet.limit"].to_s.to_i - 1 # Actually create the paginator! # NOTE: The sniffing of the proper sort from the solr response is not # currently tested for, tricky to figure out how to test, since the # default setup we test against doesn't use this feature. Blacklight::Solr::FacetPaginator.new(response.aggregations[facet_field].items, :offset => response.params[:"f.#{facet_field}.facet.offset"], :limit => limit, :sort => response.params[:"f.#{facet_field}.facet.sort"] || response.params["facet.sort"] ) end deprecation_deprecate :get_facet_pagination # a solr query method # this is used when selecting a search result: we have a query and a # position in the search results and possibly some facets # Pass in an index where 1 is the first document in the list, and # the Blacklight app-level request params that define the search. # @return [Blacklight::SolrDocument, nil] the found document or nil if not found def get_single_doc_via_search(index, request_params) query = search_builder.with(request_params).start(index - 1).rows(1).merge(fl: "*") response = repository.search(query) response.documents.first end deprecation_deprecate :get_single_doc_via_search # Get the previous and next document from a search result # @return [Blacklight::SolrResponse, Array<Blacklight::SolrDocument>] the solr response and a list of the first and last document def get_previous_and_next_documents_for_search(index, request_params, extra_controller_params={}) p = previous_and_next_document_params(index) query = search_builder.with(request_params).start(p.delete(:start)).rows(p.delete(:rows)).merge(extra_controller_params).merge(p) response = repository.search(query) document_list = response.documents # only get the previous doc if there is one prev_doc = document_list.first if index > 0 next_doc = document_list.last if (index + 1) < response.total [response, [prev_doc, next_doc]] end # a solr query method # does a standard search but returns a simplified object. # an array is returned, the first item is the query string, # the second item is an other array. This second array contains # all of the field values for each of the documents... # where the field is the "field" argument passed in. def get_opensearch_response(field=nil, request_params = params || {}, extra_controller_params={}) field ||= blacklight_config.view_config('opensearch').title_field query = search_builder.with(request_params).merge(solr_opensearch_params(field)).merge(extra_controller_params) response = repository.search(query) [response.params[:q], response.documents.flat_map {|doc| doc[field] }.uniq] end ## # The key to use to retrieve the grouped field to display def grouped_key_for_results blacklight_config.index.group end def repository_class blacklight_config.repository_class end def repository @repository ||= repository_class.new(blacklight_config) end def solr_repository repository end deprecation_deprecate solr_repository: :repository def blacklight_solr repository.connection end deprecation_deprecate blacklight_solr: "use repository.connection instead" private ## # Retrieve a set of documents by id # @overload fetch_many(ids, extra_controller_params) # @overload fetch_many(ids, user_params, extra_controller_params) def fetch_many(ids=[], *args) if args.length == 1 Deprecation.warn(Blacklight::SearchHelper, "fetch_many with 2 arguments is deprecated") user_params = params extra_controller_params = args.first || {} else user_params, extra_controller_params = args user_params ||= params extra_controller_params ||= {} end query = search_builder. with(user_params). where(blacklight_config.document_model.unique_key => ids). merge(extra_controller_params). merge(fl: '*') solr_response = repository.search(query) [solr_response, solr_response.documents] end alias_method :get_solr_response_for_document_ids, :fetch_many deprecation_deprecate get_solr_response_for_document_ids: "use fetch(ids) instead" def fetch_one(id, extra_controller_params) old_solr_doc_params = Deprecation.silence(Blacklight::SearchHelper) do solr_doc_params(id) end if default_solr_doc_params(id) != old_solr_doc_params Deprecation.warn Blacklight::SearchHelper, "The #solr_doc_params method is deprecated. Instead, you should provide a custom SolrRepository implementation for the additional behavior you're offering. The current behavior will be removed in Blacklight 6.0" extra_controller_params = extra_controller_params.merge(old_solr_doc_params) end solr_response = repository.find id, extra_controller_params [solr_response, solr_response.documents.first] end ## # @deprecated def default_solr_doc_params(id=nil) id ||= params[:id] # add our document id to the document_unique_id_param query parameter p = blacklight_config.default_document_solr_params.merge({ # this assumes the request handler will map the unique id param # to the unique key field using either solr local params, the # real-time get handler, etc. blacklight_config.document_unique_id_param => id }) p[:qt] ||= blacklight_config.document_solr_request_handler p end end
1
5,983
Line is too long. [94/80]
projectblacklight-blacklight
rb
@@ -585,4 +585,7 @@ return [ 'image_size' => 'Image size:', 'selected_size' => 'Selected:' ], + 'repeater' => [ + 'min_items_error' => 'You cannot delete any item. You should modify the existing.' + ] ];
1
<?php return [ 'auth' => [ 'title' => 'Administration Area' ], 'field' => [ 'invalid_type' => 'Invalid field type used :type.', 'options_method_invalid_model' => "The attribute ':field' does not resolve to a valid model. Try specifying the options method for model class :model explicitly.", 'options_method_not_exists' => "The model class :model must define a method :method() returning options for the ':field' form field." ], 'widget' => [ 'not_registered' => "A widget class name ':name' has not been registered", 'not_bound' => "A widget with class name ':name' has not been bound to the controller" ], 'page' => [ 'untitled' => 'Untitled', 'access_denied' => [ 'label' => 'Access denied', 'help' => "You don't have the required permissions to view this page.", 'cms_link' => 'Return to the back-end' ], 'no_database' => [ 'label' => 'Database missing', 'help' => "A database is required to access the back-end. Check the database is configured and migrated before trying again.", 'cms_link' => 'Return to the homepage' ], 'invalid_token' => [ 'label' => 'Invalid security token' ] ], 'partial' => [ 'not_found_name' => "The partial ':name' is not found." ], 'account' => [ 'signed_in_as' => 'Signed in as :full_name', 'sign_out' => 'Sign out', 'login' => 'Login', 'reset' => 'Reset', 'restore' => 'Restore', 'login_placeholder' => 'login', 'password_placeholder' => 'password', 'remember_me' => 'Stay logged in', 'forgot_password' => 'Forgot your password?', 'enter_email' => 'Enter your email', 'enter_login' => 'Enter your login', 'email_placeholder' => 'email', 'enter_new_password' => 'Enter a new password', 'password_reset' => 'Password Reset', 'restore_success' => 'Message sent to your email address with instructions.', 'restore_error' => "A user could not be found with a login value of ':login'", 'reset_success' => 'Password has been reset. You may now sign in.', 'reset_error' => 'Invalid password reset data supplied. Please try again!', 'reset_fail' => 'Unable to reset your password!', 'apply' => 'Apply', 'cancel' => 'Cancel', 'delete' => 'Delete', 'ok' => 'OK' ], 'dashboard' => [ 'menu_label' => 'Dashboard', 'widget_label' => 'Widget', 'widget_width' => 'Width', 'full_width' => 'full width', 'manage_widgets' => 'Manage widgets', 'add_widget' => 'Add widget', 'widget_inspector_title' => 'Widget configuration', 'widget_inspector_description' => 'Configure the report widget', 'widget_columns_label' => 'Width :columns', 'widget_columns_description' => 'The widget width, a number between 1 and 10.', 'widget_columns_error' => 'Please enter the widget width as a number between 1 and 10.', 'columns' => '{1} column|[2,Inf] columns', 'widget_new_row_label' => 'Force new row', 'widget_new_row_description' => 'Put the widget in a new row.', 'widget_title_label' => 'Widget title', 'widget_title_error' => 'The Widget Title is required.', 'reset_layout' => 'Reset layout', 'reset_layout_confirm' => 'Reset layout back to default?', 'reset_layout_success' => 'Layout has been reset', 'make_default' => 'Make default', 'make_default_confirm' => 'Set the current layout as the default?', 'make_default_success' => 'Current layout is now the default', 'collapse_all' => 'Collapse all', 'expand_all' => 'Expand all', 'status' => [ 'widget_title_default' => 'System status', 'update_available' => '{0} updates available!|{1} update available!|[2,Inf] updates available!', 'updates_pending' => 'Pending software updates', 'updates_nil' => 'Software is up to date', 'updates_link' => 'Update', 'warnings_pending' => 'Some issues need attention', 'warnings_nil' => 'No warnings to display', 'warnings_link' => 'View', 'core_build' => 'System build', 'event_log' => 'Event log', 'request_log' => 'Request log', 'app_birthday' => 'Online since', ], 'welcome' => [ 'widget_title_default' => 'Welcome', 'welcome_back_name' => 'Welcome back to :app, :name.', 'welcome_to_name' => 'Welcome to :app, :name.', 'first_sign_in' => 'This is the first time you have signed in.', 'last_sign_in' => 'Your last sign in was', 'view_access_logs' => 'View access logs', 'nice_message' => 'Have a great day!', ] ], 'user' => [ 'name' => 'Administrator', 'menu_label' => 'Administrators', 'menu_description' => 'Manage back-end administrator users, groups and permissions.', 'list_title' => 'Manage Administrators', 'new' => 'New Administrator', 'login' => 'Login', 'first_name' => 'First Name', 'last_name' => 'Last Name', 'full_name' => 'Full Name', 'email' => 'Email', 'role_field' => 'Role', 'role_comment' => 'Roles define user permissions, which can be overriden on the user level, on the Permissions tab.', 'groups' => 'Groups', 'groups_comment' => 'Specify which groups this account should belong to.', 'avatar' => 'Avatar', 'password' => 'Password', 'password_confirmation' => 'Confirm Password', 'permissions' => 'Permissions', 'account' => 'Account', 'superuser' => 'Super User', 'superuser_comment' => 'Grants this account unlimited access to all areas of the system. Super users can add and manage other users. ', 'send_invite' => 'Send invitation by email', 'send_invite_comment' => 'Sends a welcome message containing login and password information.', 'delete_confirm' => 'Delete this administrator?', 'return' => 'Return to admin list', 'allow' => 'Allow', 'inherit' => 'Inherit', 'deny' => 'Deny', 'activated' => 'Activated', 'last_login' => 'Last login', 'created_at' => 'Created at', 'updated_at' => 'Updated at', 'group' => [ 'name' => 'Group', 'name_field' => 'Name', 'name_comment' => 'The name is displayed in the group list on the Administrator form.', 'description_field' => 'Description', 'is_new_user_default_field_label' => 'Default group', 'is_new_user_default_field_comment' => 'Add new administrators to this group by default', 'code_field' => 'Code', 'code_comment' => 'Enter a unique code if you want to access the group object with the API.', 'menu_label' => 'Manage Groups', 'list_title' => 'Manage Groups', 'new' => 'New Group', 'delete_confirm' => 'Delete this administrator group?', 'return' => 'Return to group list', 'users_count' => 'Users' ], 'role' => [ 'name' => 'Role', 'name_field' => 'Name', 'name_comment' => 'The name is displayed in the role list on the Administrator form.', 'description_field' => 'Description', 'code_field' => 'Code', 'code_comment' => 'Enter a unique code if you want to access the role object with the API.', 'menu_label' => 'Manage Roles', 'list_title' => 'Manage Roles', 'new' => 'New Role', 'delete_confirm' => 'Delete this administrator role?', 'return' => 'Return to role list', 'users_count' => 'Users' ], 'preferences' => [ 'not_authenticated' => 'There is no an authenticated user to load or save preferences for.' ] ], 'list' => [ 'default_title' => 'List', 'search_prompt' => 'Search...', 'no_records' => 'There are no records in this view.', 'missing_model' => 'List behavior used in :class does not have a model defined.', 'missing_column' => 'There are no column definitions for :columns.', 'missing_columns' => 'List used in :class has no list columns defined.', 'missing_definition' => "List behavior does not contain a column for ':field'.", 'missing_parent_definition' => "List behavior does not contain a definition for ':definition'.", 'behavior_not_ready' => 'List behavior has not been initialized, check that you have called makeLists() in your controller.', 'invalid_column_datetime' => "Column value ':column' is not a DateTime object, are you missing a \$dates reference in the Model?", 'pagination' => 'Displayed records: :from-:to of :total', 'first_page' => 'First page', 'last_page' => 'Last page', 'prev_page' => 'Previous page', 'next_page' => 'Next page', 'refresh' => 'Refresh', 'updating' => 'Updating...', 'loading' => 'Loading...', 'setup_title' => 'List setup', 'setup_help' => 'Use checkboxes to select columns you want to see in the list. You can change position of columns by dragging them up or down.', 'records_per_page' => 'Records per page', 'records_per_page_help' => 'Select the number of records per page to display. Please note that high number of records on a single page can reduce performance.', 'check' => 'Check', 'delete_selected' => 'Delete selected', 'delete_selected_empty' => 'There are no selected records to delete.', 'delete_selected_confirm' => 'Delete the selected records?', 'delete_selected_success' => 'Deleted selected records.', 'column_switch_true' => 'Yes', 'column_switch_false' => 'No' ], 'fileupload' => [ 'attachment' => 'Attachment', 'help' => 'Add a title and description for this attachment.', 'title_label' => 'Title', 'description_label' => 'Description', 'default_prompt' => 'Click the %s or drag a file here to upload', 'attachment_url' => 'Attachment URL', 'upload_file' => 'Upload file', 'upload_error' => 'Upload error', 'remove_confirm' => 'Are you sure?', 'remove_file' => 'Remove file' ], 'form' => [ 'create_title' => 'New :name', 'update_title' => 'Edit :name', 'preview_title' => 'Preview :name', 'create_success' => ':name created', 'update_success' => ':name updated', 'delete_success' => ':name deleted', 'reset_success' => 'Reset complete', 'missing_id' => 'Form record ID has not been specified.', 'missing_model' => 'Form behavior used in :class does not have a model defined.', 'missing_definition' => "Form behavior does not contain a field for ':field'.", 'not_found' => 'Form record with an ID of :id could not be found.', 'action_confirm' => 'Are you sure?', 'create' => 'Create', 'create_and_close' => 'Create and close', 'creating' => 'Creating...', 'creating_name' => 'Creating :name...', 'save' => 'Save', 'save_and_close' => 'Save and close', 'saving' => 'Saving...', 'saving_name' => 'Saving :name...', 'delete' => 'Delete', 'deleting' => 'Deleting...', 'confirm_delete' => 'Delete record?', 'confirm_delete_multiple' => 'Delete selected records?', 'deleting_name' => 'Deleting :name...', 'reset_default' => 'Reset to default', 'resetting' => 'Resetting', 'resetting_name' => 'Resetting :name', 'undefined_tab' => 'Misc', 'field_off' => 'Off', 'field_on' => 'On', 'add' => 'Add', 'apply' => 'Apply', 'cancel' => 'Cancel', 'close' => 'Close', 'confirm' => 'Confirm', 'reload' => 'Reload', 'complete' => 'Complete', 'ok' => 'OK', 'or' => 'or', 'confirm_tab_close' => 'Close the tab? Unsaved changes will be lost.', 'behavior_not_ready' => 'Form behavior has not been initialized, check that you have called initForm() in your controller.', 'preview_no_files_message' => 'There are no files uploaded.', 'preview_no_media_message' => 'There is no media selected.', 'preview_no_record_message' => 'There is no record selected.', 'select' => 'Select', 'select_all' => 'all', 'select_none' => 'none', 'select_placeholder' => 'please select', 'insert_row' => 'Insert Row', 'insert_row_below' => 'Insert Row Below', 'delete_row' => 'Delete Row', 'concurrency_file_changed_title' => 'File was changed', 'concurrency_file_changed_description' => "The file you're editing has been changed on disk by another user. You can either reload the file and lose your changes or override the file on the disk.", 'return_to_list' => 'Return to the list' ], 'recordfinder' => [ 'find_record' => 'Find Record', 'cancel' => 'Cancel', ], 'pagelist' => [ 'page_link' => 'Page link', 'select_page' => 'Select a page...' ], 'relation' => [ 'missing_config' => "Relation behavior does not have any configuration for ':config'.", 'missing_definition' => "Relation behavior does not contain a definition for ':field'.", 'missing_model' => 'Relation behavior used in :class does not have a model defined.', 'invalid_action_single' => 'This action cannot be performed on a singular relationship.', 'invalid_action_multi' => 'This action cannot be performed on a multiple relationship.', 'help' => 'Click on an item to add', 'related_data' => 'Related :name data', 'add' => 'Add', 'add_selected' => 'Add selected', 'add_a_new' => 'Add a new :name', 'link_selected' => 'Link selected', 'link_a_new' => 'Link a new :name', 'cancel' => 'Cancel', 'close' => 'Close', 'add_name' => 'Add :name', 'create' => 'Create', 'create_name' => 'Create :name', 'update' => 'Update', 'update_name' => 'Update :name', 'preview' => 'Preview', 'preview_name' => 'Preview :name', 'remove' => 'Remove', 'remove_name' => 'Remove :name', 'delete' => 'Delete', 'delete_name' => 'Delete :name', 'delete_confirm' => 'Are you sure?', 'link' => 'Link', 'link_name' => 'Link :name', 'unlink' => 'Unlink', 'unlink_name' => 'Unlink :name', 'unlink_confirm' => 'Are you sure?' ], 'reorder' => [ 'default_title' => 'Reorder records', 'no_records' => 'There are no records available to sort.' ], 'model' => [ 'name' => 'Model', 'not_found' => "Model ':class' with an ID of :id could not be found", 'missing_id' => 'There is no ID specified for looking up the model record.', 'missing_relation' => "Model ':class' does not contain a definition for ':relation'.", 'missing_method' => "Model ':class' does not contain a method ':method'.", 'invalid_class' => "Model :model used in :class is not valid, it must inherit the \Model class.", 'mass_assignment_failed' => "Mass assignment failed for Model attribute ':attribute'." ], 'warnings' => [ 'tips' => 'System configuration tips', 'tips_description' => 'There are issues you need to pay attention to in order to configure the system properly.', 'permissions' => 'Directory :name or its subdirectories is not writable for PHP. Please set corresponding permissions for the webserver on this directory.', 'extension' => 'The PHP extension :name is not installed. Please install this library and activate the extension.', 'plugin_missing' => 'The plugin :name is a dependency but is not installed. Please install this plugin.', ], 'editor' => [ 'menu_label' => 'Editor settings', 'menu_description' => 'Customize the global editor preferences, such as font size and color scheme.', 'font_size' => 'Font size', 'tab_size' => 'Tab size', 'use_hard_tabs' => 'Indent using tabs', 'code_folding' => 'Code folding', 'code_folding_begin' => 'Mark begin', 'code_folding_begin_end' => 'Mark begin and end', 'autocompletion' => 'Autocompletion', 'word_wrap' => 'Word wrap', 'highlight_active_line' => 'Highlight active line', 'auto_closing' => 'Automatically close tags', 'show_invisibles' => 'Show invisible characters', 'show_gutter' => 'Show gutter', 'basic_autocompletion'=> 'Basic Autocompletion (Ctrl + Space)', 'live_autocompletion'=> 'Live Autocompletion', 'enable_snippets'=> 'Enable code snippets (Tab)', 'display_indent_guides'=> 'Show indent guides', 'show_print_margin'=> 'Show print margin', 'mode_off' => 'Off', 'mode_fluid' => 'Fluid', '40_characters' => '40 Characters', '80_characters' => '80 Characters', 'theme' => 'Color scheme', 'markup_styles' => 'Markup Styles', 'custom_styles' => 'Custom stylesheet', 'custom styles_comment' => 'Custom styles to include in the HTML editor.', 'markup_classes' => 'Markup Classes', 'paragraph' => 'Paragraph', 'link' => 'Link', 'table' => 'Table', 'table_cell' => 'Table Cell', 'image' => 'Image', 'label' => 'Label', 'class_name' => 'Class name', 'markup_tags' => 'Markup Tags', 'allowed_empty_tags' => 'Allowed empty tags', 'allowed_empty_tags_comment' => 'The list of tags that are not removed when they have no content inside.', 'allowed_tags' => 'Allowed tags', 'allowed_tags_comment' => 'The list of allowed tags.', 'no_wrap' => 'Do not wrap tags', 'no_wrap_comment' => 'The list of tags that should not be wrapped inside block tags.', 'remove_tags' => 'Remove tags', 'remove_tags_comment' => 'The list of tags that are removed together with their content.', 'toolbar_buttons' => 'Toolbar Buttons', 'toolbar_buttons_comment' => 'The Toolbar Buttons to be displayed in the Rich Editor by default. [fullscreen, bold, italic, underline, strikeThrough, subscript, superscript, fontFamily, fontSize, |, color, emoticons, inlineStyle, paragraphStyle, |, paragraphFormat, align, formatOL, formatUL, outdent, indent, quote, insertHR, -, insertLink, insertImage, insertVideo, insertAudio, insertFile, insertTable, undo, redo, clearFormatting, selectAll, html]', ], 'tooltips' => [ 'preview_website' => 'Preview the website' ], 'mysettings' => [ 'menu_label' => 'My Settings', 'menu_description' => 'Settings related to your administration account' ], 'myaccount' => [ 'menu_label' => 'My account', 'menu_description' => 'Update your account details such as name, email address and password.', 'menu_keywords' => 'security login' ], 'branding' => [ 'menu_label' => 'Customize back-end', 'menu_description' => 'Customize the administration area such as name, colors and logo.', 'brand' => 'Brand', 'logo' => 'Logo', 'logo_description' => 'Upload a custom logo to use in the back-end.', 'app_name' => 'App Name', 'app_name_description' => 'This name is shown in the title area of the back-end.', 'app_tagline' => 'App Tagline', 'app_tagline_description' => 'This name is shown on the sign in screen for the back-end.', 'colors' => 'Colors', 'primary_color' => 'Primary color', 'secondary_color' => 'Secondary color', 'accent_color' => 'Accent color', 'styles' => 'Styles', 'custom_stylesheet' => 'Custom stylesheet', 'navigation' => 'Navigation', 'menu_mode' => 'Menu style', 'menu_mode_inline' => 'Inline', 'menu_mode_tile' => 'Tiles', 'menu_mode_collapsed' => 'Collapsed' ], 'backend_preferences' => [ 'menu_label' => 'Back-end preferences', 'menu_description' => 'Manage your account preferences such as desired language.', 'region' => 'Region', 'code_editor' => 'Code editor', 'timezone' => 'Timezone', 'timezone_comment' => 'Adjust displayed dates to this timezone.', 'locale' => 'Locale', 'locale_comment' => 'Select your desired locale for language use.' ], 'access_log' => [ 'hint' => 'This log displays a list of successful sign in attempts by administrators. Records are kept for a total of :days days.', 'menu_label' => 'Access log', 'menu_description' => 'View a list of successful back-end user sign ins.', 'created_at' => 'Date & Time', 'login' => 'Login', 'ip_address' => 'IP address', 'first_name' => 'First name', 'last_name' => 'Last name', 'email' => 'Email' ], 'filter' => [ 'all' => 'all', 'options_method_not_exists' => "The model class :model must define a method :method() returning options for the ':filter' filter.", 'date_all' => 'all periods', 'number_all' => 'all numbers', ], 'import_export' => [ 'upload_csv_file' => '1. Upload a CSV file', 'import_file' => 'Import file', 'first_row_contains_titles' => 'First row contains column titles', 'first_row_contains_titles_desc' => 'Leave this checked if the first row in the CSV is used as the column titles.', 'match_columns' => '2. Match the file columns to database fields', 'file_columns' => 'File columns', 'database_fields' => 'Database fields', 'set_import_options' => '3. Set import options', 'export_output_format' => '1. Export output format', 'file_format' => 'File format', 'standard_format' => 'Standard format', 'custom_format' => 'Custom format', 'delimiter_char' => 'Delimiter character', 'enclosure_char' => 'Enclosure character', 'escape_char' => 'Escape character', 'select_columns' => '2. Select columns to export', 'column' => 'Column', 'columns' => 'Columns', 'set_export_options' => '3. Set export options', 'show_ignored_columns' => 'Show ignored columns', 'auto_match_columns' => 'Auto match columns', 'created' => 'Created', 'updated' => 'Updated', 'skipped' => 'Skipped', 'warnings' => 'Warnings', 'errors' => 'Errors', 'skipped_rows' => 'Skipped Rows', 'import_progress' => 'Import progress', 'processing' => 'Processing', 'import_error' => 'Import error', 'upload_valid_csv' => 'Please upload a valid CSV file.', 'drop_column_here' => 'Drop column here...', 'ignore_this_column' => 'Ignore this column', 'processing_successful_line1' => 'File export process completed!', 'processing_successful_line2' => 'The browser will now redirect to the file download.', 'export_progress' => 'Export progress', 'export_error' => 'Export error', 'column_preview' => 'Column preview', 'file_not_found_error' => 'File not found', 'empty_error' => 'There was no data supplied to export', 'empty_import_columns_error' => 'Please specify some columns to import.', 'match_some_column_error' => 'Please match some columns first.', 'required_match_column_error' => 'Please specify a match for the required field :label.', 'empty_export_columns_error' => 'Please specify some columns to export.', 'behavior_missing_uselist_error' => 'You must implement the controller behavior ListController with the export "useList" option enabled.', 'missing_model_class_error' => 'Please specify the modelClass property for :type', 'missing_column_id_error' => 'Missing column identifier', 'unknown_column_error' => 'Unknown column', 'encoding_not_supported_error' => 'Source file encoding is not recognized. Please select the custom file format option with the proper encoding to import your file.', 'encoding_format' => 'File encoding', 'encodings' => [ 'utf_8' => 'UTF-8', 'us_ascii' => 'US-ASCII', 'iso_8859_1' => 'ISO-8859-1 (Latin-1, Western European)', 'iso_8859_2' => 'ISO-8859-2 (Latin-2, Central European)', 'iso_8859_3' => 'ISO-8859-3 (Latin-3, South European)', 'iso_8859_4' => 'ISO-8859-4 (Latin-4, North European)', 'iso_8859_5' => 'ISO-8859-5 (Latin, Cyrillic)', 'iso_8859_6' => 'ISO-8859-6 (Latin, Arabic)', 'iso_8859_7' => 'ISO-8859-7 (Latin, Greek)', 'iso_8859_8' => 'ISO-8859-8 (Latin, Hebrew)', 'iso_8859_0' => 'ISO-8859-9 (Latin-5, Turkish)', 'iso_8859_10' => 'ISO-8859-10 (Latin-6, Nordic)', 'iso_8859_11' => 'ISO-8859-11 (Latin, Thai)', 'iso_8859_13' => 'ISO-8859-13 (Latin-7, Baltic Rim)', 'iso_8859_14' => 'ISO-8859-14 (Latin-8, Celtic)', 'iso_8859_15' => 'ISO-8859-15 (Latin-9, Western European revision with euro sign)', 'windows_1251' => 'Windows-1251 (CP1251)', 'windows_1252' => 'Windows-1252 (CP1252)' ] ], 'permissions' => [ 'manage_media' => 'Upload and manage media contents - images, videos, sounds, documents' ], 'mediafinder' => [ 'label' => 'Media Finder', 'default_prompt' => 'Click the %s button to find a media item' ], 'media' => [ 'menu_label' => 'Media', 'upload' => 'Upload', 'move' => 'Move', 'delete' => 'Delete', 'add_folder' => 'Add folder', 'search' => 'Search', 'display' => 'Display', 'filter_everything' => 'Everything', 'filter_images' => 'Images', 'filter_video' => 'Video', 'filter_audio' => 'Audio', 'filter_documents' => 'Documents', 'library' => 'Library', 'size' => 'Size', 'title' => 'Title', 'last_modified' => 'Last modified', 'public_url' => 'URL', 'click_here' => 'Click here', 'thumbnail_error' => 'Error generating thumbnail.', 'return_to_parent' => 'Return to the parent folder', 'return_to_parent_label' => 'Go up ..', 'nothing_selected' => 'Nothing is selected.', 'multiple_selected' => 'Multiple items selected.', 'uploading_file_num' => 'Uploading :number file(s)...', 'uploading_complete' => 'Upload complete', 'uploading_error' => 'Upload failed', 'type_blocked' => 'The file type used is blocked for security reasons.', 'order_by' => 'Order by', 'direction' => 'Direction', 'direction_asc' => 'Ascending', 'direction_desc' => 'Descending', 'folder' => 'Folder', 'no_files_found' => 'No files found by your request.', 'delete_empty' => 'Please select items to delete.', 'delete_confirm' => 'Delete the selected item(s)?', 'error_renaming_file' => 'Error renaming the item.', 'new_folder_title' => 'New folder', 'folder_name' => 'Folder name', 'error_creating_folder' => 'Error creating folder', 'folder_or_file_exist' => 'A folder or file with the specified name already exists.', 'move_empty' => 'Please select items to move.', 'move_popup_title' => 'Move files or folders', 'move_destination' => 'Destination folder', 'please_select_move_dest' => 'Please select a destination folder.', 'move_dest_src_match' => 'Please select another destination folder.', 'empty_library' => 'It looks a bit empty here. Upload files or create folders to get started.', 'insert' => 'Insert', 'crop_and_insert' => 'Crop & Insert', 'select_single_image' => 'Please select a single image.', 'selection_not_image' => 'The selected item is not an image.', 'restore' => 'Undo all changes', 'resize' => 'Resize...', 'selection_mode_normal' => 'Normal', 'selection_mode_fixed_ratio' => 'Fixed ratio', 'selection_mode_fixed_size' => 'Fixed size', 'height' => 'Height', 'width' => 'Width', 'selection_mode' => 'Selection mode', 'resize_image' => 'Resize image', 'image_size' => 'Image size:', 'selected_size' => 'Selected:' ], ];
1
12,846
The error should be `At least :number items are required`
octobercms-october
php
@@ -172,11 +172,9 @@ func (s *server) Ping(in *pb.Request, stream pb.Simulator_PingServer) error { // message type of 1999 means that it's a dummy message to allow the engine to pass back proposed blocks if in.InternalMsgType != dummyMsgType { msg := CombineMsg(in.InternalMsgType, msgValue) - switch msg.(type) { - case *iproto.ProposePb: - err = s.nodes[in.PlayerID].HandleBlockPropose(msg.(*iproto.ProposePb), done) - case *iproto.EndorsePb: - err = s.nodes[in.PlayerID].HandleEndorse(msg.(*iproto.EndorsePb), done) + switch cMsg := msg.(type) { + case *iproto.ConsensusPb: + err = s.nodes[in.PlayerID].HandleConsensusMsg(cMsg, done) } if err != nil { logger.Error().Err(err).Msg("failed to handle view change")
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package sim import ( "context" "encoding/hex" "flag" "fmt" "net" "os" "runtime/pprof" "strconv" "time" "google.golang.org/grpc" "google.golang.org/grpc/reflection" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/config" pb "github.com/iotexproject/iotex-core/consensus/sim/proto" "github.com/iotexproject/iotex-core/crypto" "github.com/iotexproject/iotex-core/logger" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/proto" ) const ( port = ":50051" dummyMsgType = 1999 ) // server is used to implement message.SimulatorServer. type ( server struct { nodes []Sim // slice of Consensus objects } byzVal struct { val blockchain.Validator } ) var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") // Validate for the byzantine node uses the actual block validator and returns the opposite func (v *byzVal) Validate(blk *blockchain.Block, tipHeight uint64, tipHash hash.Hash32B, containCoinbase bool) error { //err := v.val.Validate(blk, tipHeight, tipHash) //if err != nil { // return nil //} //return errors.New("") return nil } // AddActionValidators add validators func (v *byzVal) AddActionValidators(validators ...protocol.ActionValidator) {} // AddActionEnvelopeValidators add validators func (v *byzVal) AddActionEnvelopeValidators(validators ...protocol.ActionEnvelopeValidator) {} // Ping implements simulator.SimulatorServer func (s *server) Init(in *pb.InitRequest, stream pb.Simulator_InitServer) error { nPlayers := in.NBF + in.NFS + in.NHonest ctx := context.Background() for i := 0; i < int(nPlayers); i++ { cfg := config.Default // s.nodes = make([]consensus.Sim, in.NPlayers) // allocate all the necessary space now because otherwise nodes will get copied and create pointer issues cfg.Consensus.Scheme = config.RollDPoSScheme cfg.Consensus.RollDPoS.DelegateInterval = time.Millisecond cfg.Consensus.RollDPoS.ProposerInterval = 0 cfg.Consensus.RollDPoS.UnmatchedEventTTL = 1000 * time.Second cfg.Consensus.RollDPoS.RoundStartTTL = 1000 * time.Second cfg.Consensus.RollDPoS.AcceptProposeTTL = 1000 * time.Second cfg.Consensus.RollDPoS.AcceptProposalEndorseTTL = 1000 * time.Second cfg.Consensus.RollDPoS.AcceptCommitEndorseTTL = 1000 * time.Second // handle node address, delegate addresses, etc. cfg.Network.Host = "127.0.0.1" cfg.Network.Port = 10000 // create public/private key pair and address pk, sk, err := crypto.EC283.NewKeyPair() if err != nil { logger.Error().Err(err).Msg("failed to create public/private key pair together.") } cfg.Chain.ProducerPubKey = keypair.EncodePublicKey(pk) cfg.Chain.ProducerPrivKey = keypair.EncodePrivateKey(sk) // set chain database path cfg.Chain.ChainDBPath = "./chain" + strconv.Itoa(i) + ".db" cfg.Chain.TrieDBPath = "./trie" + strconv.Itoa(i) + ".db" bc := blockchain.NewBlockchain(cfg, blockchain.DefaultStateFactoryOption(), blockchain.BoltDBDaoOption()) if err := bc.Start(ctx); err != nil { logger.Panic().Err(err).Msg("error when starting blockchain") } if i >= int(in.NFS+in.NHonest) { // is byzantine node val := bc.Validator() byzVal := &byzVal{val: val} bc.SetValidator(byzVal) } ap, err := actpool.NewActPool(bc, cfg.ActPool) if err != nil { logger.Fatal().Err(err).Msg("Fail to create actpool") } var node Sim if i < int(in.NHonest) { node = NewSim(cfg, bc, ap) } else if i < int(in.NHonest+in.NFS) { s.nodes = append(s.nodes, nil) continue } else { node = NewSimByzantine(cfg, bc, ap) } s.nodes = append(s.nodes, node) done := make(chan bool) node.SetDoneStream(done) err = node.Start(ctx) if err != nil { logger.Fatal().Err(err).Msg("Fail to start node") } fmt.Printf("Node %d initialized and consensus engine started\n", i) time.Sleep(2 * time.Millisecond) <-done fmt.Printf("Node %d initialization ended\n", i) //s.nodes = append(s.nodes, node) } for i := 0; i < int(in.NFS); i++ { s.nodes = append(s.nodes, nil) } fmt.Printf("Simulator initialized with %d players\n", nPlayers) return nil } // Ping implements simulator.SimulatorServer func (s *server) Ping(in *pb.Request, stream pb.Simulator_PingServer) error { fmt.Println() fmt.Printf("Node %d pinged; opened message stream\n", in.PlayerID) msgValue, err := hex.DecodeString(in.Value) if err != nil { logger.Error().Msg("Could not decode message value into byte array") } done := make(chan bool) s.nodes[in.PlayerID].SetStream(&stream) s.nodes[in.PlayerID].SendUnsent() // message type of 1999 means that it's a dummy message to allow the engine to pass back proposed blocks if in.InternalMsgType != dummyMsgType { msg := CombineMsg(in.InternalMsgType, msgValue) switch msg.(type) { case *iproto.ProposePb: err = s.nodes[in.PlayerID].HandleBlockPropose(msg.(*iproto.ProposePb), done) case *iproto.EndorsePb: err = s.nodes[in.PlayerID].HandleEndorse(msg.(*iproto.EndorsePb), done) } if err != nil { logger.Error().Err(err).Msg("failed to handle view change") } time.Sleep(2 * time.Millisecond) <-done // wait until done } fmt.Println("closed message stream") return nil } func (s *server) Exit(context context.Context, in *pb.Empty) (*pb.Empty, error) { defer os.Exit(0) defer pprof.StopCPUProfile() return &pb.Empty{}, nil } func main() { flag.Parse() if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { logger.Fatal().Err(err).Msg("failed to create file") } err = pprof.StartCPUProfile(f) if err != nil { logger.Fatal().Err(err).Msg("failed to start CPU profile") } } lis, err := net.Listen("tcp", port) if err != nil { logger.Fatal().Err(err).Msg("failed to listen") } s := grpc.NewServer() pb.RegisterSimulatorServer(s, &server{}) // Register reflection service on gRPC server. reflection.Register(s) if err := s.Serve(lis); err != nil { logger.Fatal().Err(err).Msg("failed to serve") } }
1
13,802
singleCaseSwitch: should rewrite switch statement to if statement (from `gocritic`)
iotexproject-iotex-core
go
@@ -506,11 +506,7 @@ type encryptionKeyGetter interface { kbfscrypto.TLFCryptKey, error) } -// KeyManager fetches and constructs the keys needed for KBFS file -// operations. -type KeyManager interface { - encryptionKeyGetter - +type mdDecryptionKeyGetter interface { // GetTLFCryptKeyForMDDecryption gets the crypt key to use for the // TLF with the given metadata to decrypt the private portion of // the metadata. It finds the appropriate key from mdWithKeys
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "time" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" metrics "github.com/rcrowley/go-metrics" "golang.org/x/net/context" ) // Block just needs to be (de)serialized using msgpack type Block interface { // GetEncodedSize returns the encoded size of this block, but only // if it has been previously set; otherwise it returns 0. GetEncodedSize() uint32 // SetEncodedSize sets the encoded size of this block, locally // caching it. The encoded size is not serialized. SetEncodedSize(size uint32) // DataVersion returns the data version for this block DataVersion() DataVer } // NodeID is a unique but transient ID for a Node. That is, two Node // objects in memory at the same time represent the same file or // directory if and only if their NodeIDs are equal (by pointer). type NodeID interface { // ParentID returns the NodeID of the directory containing the // pointed-to file or directory, or nil if none exists. ParentID() NodeID } // Node represents a direct pointer to a file or directory in KBFS. // It is somewhat like an inode in a regular file system. Users of // KBFS can use Node as a handle when accessing files or directories // they have previously looked up. type Node interface { // GetID returns the ID of this Node. This should be used as a // map key instead of the Node itself. GetID() NodeID // GetFolderBranch returns the folder ID and branch for this Node. GetFolderBranch() FolderBranch // GetBasename returns the current basename of the node, or "" // if the node has been unlinked. GetBasename() string } // KBFSOps handles all file system operations. Expands all indirect // pointers. Operations that modify the server data change all the // block IDs along the path, and so must return a path with the new // BlockIds so the caller can update their references. // // KBFSOps implementations must guarantee goroutine-safety of calls on // a per-top-level-folder basis. // // There are two types of operations that could block: // * remote-sync operations, that need to synchronously update the // MD for the corresponding top-level folder. When these // operations return successfully, they will have guaranteed to // have successfully written the modification to the KBFS servers. // * remote-access operations, that don't sync any modifications to KBFS // servers, but may block on reading data from the servers. // // KBFSOps implementations are supposed to give git-like consistency // semantics for modification operations; they will be visible to // other clients immediately after the remote-sync operations succeed, // if and only if there was no other intervening modification to the // same folder. If not, the change will be sync'd to the server in a // special per-device "unmerged" area before the operation succeeds. // In this case, the modification will not be visible to other clients // until the KBFS code on this device performs automatic conflict // resolution in the background. // // All methods take a Context (see https://blog.golang.org/context), // and if that context is cancelled during the operation, KBFSOps will // abort any blocking calls and return ctx.Err(). Any notifications // resulting from an operation will also include this ctx (or a // Context derived from it), allowing the caller to determine whether // the notification is a result of their own action or an external // action. type KBFSOps interface { // GetFavorites returns the logged-in user's list of favorite // top-level folders. This is a remote-access operation. GetFavorites(ctx context.Context) ([]Favorite, error) // RefreshCachedFavorites tells the instances to forget any cached // favorites list and fetch a new list from the server. The // effects are asychronous; if there's an error refreshing the // favorites, the cached favorites will become empty. RefreshCachedFavorites(ctx context.Context) // AddFavorite adds the favorite to both the server and // the local cache. AddFavorite(ctx context.Context, fav Favorite) error // DeleteFavorite deletes the favorite from both the server and // the local cache. Idempotent, so it succeeds even if the folder // isn't favorited. DeleteFavorite(ctx context.Context, fav Favorite) error // GetTLFCryptKeys gets crypt key of all generations as well as // TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by // generation, starting with the key for FirstValidKeyGen. GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) ( keys []kbfscrypto.TLFCryptKey, id TlfID, err error) // GetTLFID gets the TlfID for tlfHandle. GetTLFID(ctx context.Context, tlfHandle *TlfHandle) (TlfID, error) // GetOrCreateRootNode returns the root node and root entry // info associated with the given TLF handle and branch, if // the logged-in user has read permissions to the top-level // folder. It creates the folder if one doesn't exist yet (and // branch == MasterBranch), and the logged-in user has write // permissions to the top-level folder. This is a // remote-access operation. GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetRootNode is like GetOrCreateRootNode but if the root node // does not exist it will return a nil Node and not create it. GetRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetDirChildren returns a map of children in the directory, // mapped to their EntryInfo, if the logged-in user has read // permission for the top-level folder. This is a remote-access // operation. GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error) // Lookup returns the Node and entry info associated with a // given name in a directory, if the logged-in user has read // permissions to the top-level folder. The returned Node is nil // if the name is a symlink. This is a remote-access operation. Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error) // Stat returns the entry info associated with a // given Node, if the logged-in user has read permissions to the // top-level folder. This is a remote-access operation. Stat(ctx context.Context, node Node) (EntryInfo, error) // CreateDir creates a new subdirectory under the given node, if // the logged-in user has write permission to the top-level // folder. Returns the new Node for the created subdirectory, and // its new entry info. This is a remote-sync operation. CreateDir(ctx context.Context, dir Node, name string) ( Node, EntryInfo, error) // CreateFile creates a new file under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new Node for the created file, and its new // entry info. excl (when implemented) specifies whether this is an exclusive // create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a // Unix open() call. // // This is a remote-sync operation. CreateFile(ctx context.Context, dir Node, name string, isExec bool, excl Excl) ( Node, EntryInfo, error) // CreateLink creates a new symlink under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new entry info for the created symlink. This // is a remote-sync operation. CreateLink(ctx context.Context, dir Node, fromName string, toPath string) ( EntryInfo, error) // RemoveDir removes the subdirectory represented by the given // node, if the logged-in user has write permission to the // top-level folder. Will return an error if the subdirectory is // not empty. This is a remote-sync operation. RemoveDir(ctx context.Context, dir Node, dirName string) error // RemoveEntry removes the directory entry represented by the // given node, if the logged-in user has write permission to the // top-level folder. This is a remote-sync operation. RemoveEntry(ctx context.Context, dir Node, name string) error // Rename performs an atomic rename operation with a given // top-level folder if the logged-in user has write permission to // that folder, and will return an error if nodes from different // folders are passed in. Also returns an error if the new name // already has an entry corresponding to an existing directory // (only non-dir types may be renamed over). This is a // remote-sync operation. Rename(ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) error // Read fills in the given buffer with data from the file at the // given node starting at the given offset, if the logged-in user // has read permission to the top-level folder. The read data // reflects any outstanding writes and truncates to that file that // have been written through this KBFSOps object, even if those // writes have not yet been sync'd. There is no guarantee that // Read returns all of the requested data; it will return the // number of bytes that it wrote to the dest buffer. Reads on an // unlinked file may or may not succeed, depending on whether or // not the data has been cached locally. If (0, nil) is returned, // that means EOF has been reached. This is a remote-access // operation. Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error) // Write modifies the file at the given node, by writing the given // buffer at the given offset within the file, if the logged-in // user has write permission to the top-level folder. It // overwrites any data already there, and extends the file size as // necessary to accomodate the new data. It guarantees to write // the entire buffer in one operation. Writes on an unlinked file // may or may not succeed as no-ops, depending on whether or not // the necessary blocks have been locally cached. This is a // remote-access operation. Write(ctx context.Context, file Node, data []byte, off int64) error // Truncate modifies the file at the given node, by either // shrinking or extending its size to match the given size, if the // logged-in user has write permission to the top-level folder. // If extending the file, it pads the new data with 0s. Truncates // on an unlinked file may or may not succeed as no-ops, depending // on whether or not the necessary blocks have been locally // cached. This is a remote-access operation. Truncate(ctx context.Context, file Node, size uint64) error // SetEx turns on or off the executable bit on the file // represented by a given node, if the logged-in user has write // permissions to the top-level folder. This is a remote-sync // operation. SetEx(ctx context.Context, file Node, ex bool) error // SetMtime sets the modification time on the file represented by // a given node, if the logged-in user has write permissions to // the top-level folder. If mtime is nil, it is a noop. This is // a remote-sync operation. SetMtime(ctx context.Context, file Node, mtime *time.Time) error // Sync flushes all outstanding writes and truncates for the given // file to the KBFS servers, if the logged-in user has write // permissions to the top-level folder. If done through a file // system interface, this may include modifications done via // multiple file handles. This is a remote-sync operation. Sync(ctx context.Context, file Node) error // FolderStatus returns the status of a particular folder/branch, along // with a channel that will be closed when the status has been // updated (to eliminate the need for polling this method). FolderStatus(ctx context.Context, folderBranch FolderBranch) ( FolderBranchStatus, <-chan StatusUpdate, error) // Status returns the status of KBFS, along with a channel that will be // closed when the status has been updated (to eliminate the need for // polling this method). KBFSStatus can be non-empty even if there is an // error. Status(ctx context.Context) ( KBFSStatus, <-chan StatusUpdate, error) // UnstageForTesting clears out this device's staged state, if // any, and fast-forwards to the current head of this // folder-branch. UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error // Rekey rekeys this folder. Rekey(ctx context.Context, id TlfID) error // SyncFromServerForTesting blocks until the local client has // contacted the server and guaranteed that all known updates // for the given top-level folder have been applied locally // (and notifications sent out to any observers). It returns // an error if this folder-branch is currently unmerged or // dirty locally. SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch) error // GetUpdateHistory returns a complete history of all the merged // updates of the given folder, in a data structure that's // suitable for encoding directly into JSON. This is an expensive // operation, and should only be used for ocassional debugging. // Note that the history does not include any unmerged changes or // outstanding writes from the local device. GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) ( history TLFUpdateHistory, err error) // GetEditHistory returns a clustered list of the most recent file // edits by each of the valid writers of the given folder. users // looking to get updates to this list can register as an observer // for the folder. GetEditHistory(ctx context.Context, folderBranch FolderBranch) ( edits TlfWriterEdits, err error) // GetNodeMetadata gets metadata associated with a Node. GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error) // Shutdown is called to clean up any resources associated with // this KBFSOps instance. Shutdown() error // PushConnectionStatusChange updates the status of a service for // human readable connection status tracking. PushConnectionStatusChange(service string, newStatus error) } // KeybaseService is an interface for communicating with the keybase // service. type KeybaseService interface { // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UID) mapping // can be trusted. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UID, error) // Identify, given an assertion, returns a UserInfo struct // with the user that matches that assertion, or an error // otherwise. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) (UserInfo, error) // LoadUserPlusKeys returns a UserInfo struct for a // user with the specified UID. // If you have the UID for a user and don't require Identify to // validate an assertion or the identity of a user, use this to // get UserInfo structs as it is much cheaper than Identify. LoadUserPlusKeys(ctx context.Context, uid keybase1.UID) (UserInfo, error) // LoadUnverifiedKeys returns a list of unverified public keys. They are the union // of all known public keys associated with the account and the currently verified // keys currently part of the user's sigchain. LoadUnverifiedKeys(ctx context.Context, uid keybase1.UID) ( []keybase1.PublicKey, error) // CurrentSession returns a SessionInfo struct with all the // information for the current session, or an error otherwise. CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error) // FavoriteAdd adds the given folder to the list of favorites. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteAdd removes the given folder from the list of // favorites. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the current list of favorites. FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error) // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error // NotifySyncStatus sends a sync status notification. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) error // FlushUserFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached information about the given user. // This does NOT involve communication with the daemon, this is // just to force future calls loading this user to fall through to // the daemon itself, rather than being served from the cache. FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID) // FlushUserUnverifiedKeysFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached unverified keys for the given user. FlushUserUnverifiedKeysFromLocalCache(ctx context.Context, uid keybase1.UID) // TODO: Add CryptoClient methods, too. // Shutdown frees any resources associated with this // instance. No other methods may be called after this is // called. Shutdown() } // KeybaseServiceCn defines methods needed to construct KeybaseService // and Crypto implementations. type KeybaseServiceCn interface { NewKeybaseService(config Config, params InitParams, ctx Context, log logger.Logger) (KeybaseService, error) NewCrypto(config Config, params InitParams, ctx Context, log logger.Logger) (Crypto, error) } type resolver interface { // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UID) mapping // can be trusted. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UID, error) } type identifier interface { // Identify resolves an assertion (which could also be a // username) to a UserInfo struct, spawning tracker popups if // necessary. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) (UserInfo, error) } type normalizedUsernameGetter interface { // GetNormalizedUsername returns the normalized username // corresponding to the given UID. GetNormalizedUsername(ctx context.Context, uid keybase1.UID) (libkb.NormalizedUsername, error) } type currentInfoGetter interface { // GetCurrentToken gets the current keybase session token. GetCurrentToken(ctx context.Context) (string, error) // GetCurrentUserInfo gets the name and UID of the current // logged-in user. GetCurrentUserInfo(ctx context.Context) ( libkb.NormalizedUsername, keybase1.UID, error) // GetCurrentCryptPublicKey gets the crypt public key for the // currently-active device. GetCurrentCryptPublicKey(ctx context.Context) ( kbfscrypto.CryptPublicKey, error) // GetCurrentVerifyingKey gets the public key used for signing for the // currently-active device. GetCurrentVerifyingKey(ctx context.Context) ( kbfscrypto.VerifyingKey, error) } // KBPKI interacts with the Keybase daemon to fetch user info. type KBPKI interface { currentInfoGetter resolver identifier normalizedUsernameGetter // HasVerifyingKey returns nil if the given user has the given // VerifyingKey, and an error otherwise. HasVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey, atServerTime time.Time) error // HasUnverifiedVerifyingKey returns nil if the given user has the given // unverified VerifyingKey, and an error otherwise. Note that any match // is with a key not verified to be currently connected to the user via // their sigchain. This is currently only used to verify finalized or // reset TLFs. Further note that unverified keys is a super set of // verified keys. HasUnverifiedVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) error // GetCryptPublicKeys gets all of a user's crypt public keys (including // paper keys). GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) ( []kbfscrypto.CryptPublicKey, error) // TODO: Split the methods below off into a separate // FavoriteOps interface. // FavoriteAdd adds folder to the list of the logged in user's // favorite folders. It is idempotent. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteDelete deletes folder from the list of the logged in user's // favorite folders. It is idempotent. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the list of all favorite folders for // the logged in user. FavoriteList(ctx context.Context) ([]keybase1.Folder, error) // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error } // KeyMetadata is an interface for something that holds key // information. This is usually implemented by RootMetadata. type KeyMetadata interface { // TlfID returns the ID of the TLF for which this object holds // key info. TlfID() TlfID // LatestKeyGeneration returns the most recent key generation // with key data in this object, or PublicKeyGen if this TLF // is public. LatestKeyGeneration() KeyGen // GetTlfHandle returns the handle for the TLF. It must not // return nil. // // TODO: Remove the need for this function in this interface, // so that BareRootMetadata can implement this interface // fully. GetTlfHandle() *TlfHandle // HasKeyForUser returns whether or not the given user has // keys for at least one device at the given key // generation. Returns false if the TLF is public, or if the // given key generation is invalid. HasKeyForUser(keyGen KeyGen, user keybase1.UID) bool // GetTLFCryptKeyParams returns all the necessary info to // construct the TLF crypt key for the given key generation, // user, and device (identified by its crypt public key), or // false if not found. This returns an error if the TLF is // public. GetTLFCryptKeyParams( keyGen KeyGen, user keybase1.UID, key kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFEphemeralPublicKey, EncryptedTLFCryptKeyClientHalf, TLFCryptKeyServerHalfID, bool, error) // StoresHistoricTLFCryptKeys returns whether or not history keys are // symmetrically encrypted; if not, they're encrypted per-device. StoresHistoricTLFCryptKeys() bool // GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given // generation using the current generation's TLFCryptKey. GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen, currentKey kbfscrypto.TLFCryptKey) ( kbfscrypto.TLFCryptKey, error) } type encryptionKeyGetter interface { // GetTLFCryptKeyForEncryption gets the crypt key to use for // encryption (i.e., with the latest key generation) for the // TLF with the given metadata. GetTLFCryptKeyForEncryption(ctx context.Context, kmd KeyMetadata) ( kbfscrypto.TLFCryptKey, error) } // KeyManager fetches and constructs the keys needed for KBFS file // operations. type KeyManager interface { encryptionKeyGetter // GetTLFCryptKeyForMDDecryption gets the crypt key to use for the // TLF with the given metadata to decrypt the private portion of // the metadata. It finds the appropriate key from mdWithKeys // (which in most cases is the same as mdToDecrypt) if it's not // already cached. GetTLFCryptKeyForMDDecryption(ctx context.Context, kmdToDecrypt, kmdWithKeys KeyMetadata) ( kbfscrypto.TLFCryptKey, error) // GetTLFCryptKeyForBlockDecryption gets the crypt key to use // for the TLF with the given metadata to decrypt the block // pointed to by the given pointer. GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (kbfscrypto.TLFCryptKey, error) // GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations // for current devices. keys contains crypt keys from all generations, in // order, starting from FirstValidKeyGen. GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd KeyMetadata) ( keys []kbfscrypto.TLFCryptKey, err error) // Rekey checks the given MD object, if it is a private TLF, // against the current set of device keys for all valid // readers and writers. If there are any new devices, it // updates all existing key generations to include the new // devices. If there are devices that have been removed, it // creates a new epoch of keys for the TLF. If no devices // have changed, or if there was an error, it returns false. // Otherwise, it returns true. If a new key generation is // added the second return value points to this new key. This // is to allow for caching of the TLF crypt key only after a // successful merged write of the metadata. Otherwise we could // prematurely pollute the key cache. // // If the given MD object is a public TLF, it simply updates // the TLF's handle with any newly-resolved writers. // // If promptPaper is set, prompts for any unlocked paper keys. // promptPaper shouldn't be set if md is for a public TLF. Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) ( bool, *kbfscrypto.TLFCryptKey, error) } // Reporter exports events (asynchronously) to any number of sinks type Reporter interface { // ReportErr records that a given error happened. ReportErr(ctx context.Context, tlfName CanonicalTlfName, public bool, mode ErrorModeType, err error) // AllKnownErrors returns all errors known to this Reporter. AllKnownErrors() []ReportedError // Notify sends the given notification to any sink. Notify(ctx context.Context, notification *keybase1.FSNotification) // NotifySyncStatus sends the given path sync status to any sink. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) // Shutdown frees any resources allocated by a Reporter. Shutdown() } // MDCache gets and puts plaintext top-level metadata into the cache. type MDCache interface { // Get gets the metadata object associated with the given TlfID, // revision number, and branch ID (NullBranchID for merged MD). Get(tlf TlfID, rev MetadataRevision, bid BranchID) (ImmutableRootMetadata, error) // Put stores the metadata object. Put(md ImmutableRootMetadata) error // Delete removes the given metadata object from the cache if it exists. Delete(tlf TlfID, rev MetadataRevision, bid BranchID) // Replace replaces the entry matching the md under the old branch // ID with the new one. If the old entry doesn't exist, this is // equivalent to a Put. Replace(newRmd ImmutableRootMetadata, oldBID BranchID) error } // KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys. type KeyCache interface { // GetTLFCryptKey gets the crypt key for the given TLF. GetTLFCryptKey(TlfID, KeyGen) (kbfscrypto.TLFCryptKey, error) // PutTLFCryptKey stores the crypt key for the given TLF. PutTLFCryptKey(TlfID, KeyGen, kbfscrypto.TLFCryptKey) error } // BlockCacheLifetime denotes the lifetime of an entry in BlockCache. type BlockCacheLifetime int const ( // TransientEntry means that the cache entry may be evicted at // any time. TransientEntry BlockCacheLifetime = iota // PermanentEntry means that the cache entry must remain until // explicitly removed from the cache. PermanentEntry ) // BlockCache gets and puts plaintext dir blocks and file blocks into // a cache. These blocks are immutable and identified by their // content hash. type BlockCache interface { // Get gets the block associated with the given block ID. Get(ptr BlockPointer) (Block, error) // CheckForKnownPtr sees whether this cache has a transient // entry for the given file block, which must be a direct file // block containing data). Returns the full BlockPointer // associated with that ID, including key and data versions. // If no ID is known, return an uninitialized BlockPointer and // a nil error. CheckForKnownPtr(tlf TlfID, block *FileBlock) (BlockPointer, error) // Put stores the final (content-addressable) block associated // with the given block ID. If lifetime is TransientEntry, // then it is assumed that the block exists on the server and // the entry may be evicted from the cache at any time. If // lifetime is PermanentEntry, then it is assumed that the // block doesn't exist on the server and must remain in the // cache until explicitly removed. As an intermediary state, // as when a block is being sent to the server, the block may // be put into the cache both with TransientEntry and // PermanentEntry -- these are two separate entries. This is // fine, since the block should be the same. Put(ptr BlockPointer, tlf TlfID, block Block, lifetime BlockCacheLifetime) error // DeleteTransient removes the transient entry for the given // pointer from the cache, as well as any cached IDs so the block // won't be reused. DeleteTransient(ptr BlockPointer, tlf TlfID) error // Delete removes the permanent entry for the non-dirty block // associated with the given block ID from the cache. No // error is returned if no block exists for the given ID. DeletePermanent(id BlockID) error // DeleteKnownPtr removes the cached ID for the given file // block. It does not remove the block itself. DeleteKnownPtr(tlf TlfID, block *FileBlock) error } // DirtyPermChan is a channel that gets closed when the holder has // permission to write. We are forced to define it as a type due to a // bug in mockgen that can't handle return values with a chan // struct{}. type DirtyPermChan <-chan struct{} // DirtyBlockCache gets and puts plaintext dir blocks and file blocks // into a cache, which have been modified by the application and not // yet committed on the KBFS servers. They are identified by a // (potentially random) ID that may not have any relationship with // their context, along with a Branch in case the same TLF is being // modified via multiple branches. Dirty blocks are never evicted, // they must be deleted explicitly. type DirtyBlockCache interface { // Get gets the block associated with the given block ID. Returns // the dirty block for the given ID, if one exists. Get(tlfID TlfID, ptr BlockPointer, branch BranchName) (Block, error) // Put stores a dirty block currently identified by the // given block pointer and branch name. Put(tlfID TlfID, ptr BlockPointer, branch BranchName, block Block) error // Delete removes the dirty block associated with the given block // pointer and branch from the cache. No error is returned if no // block exists for the given ID. Delete(tlfID TlfID, ptr BlockPointer, branch BranchName) error // IsDirty states whether or not the block associated with the // given block pointer and branch name is dirty in this cache. IsDirty(tlfID TlfID, ptr BlockPointer, branch BranchName) bool // IsAnyDirty returns whether there are any dirty blocks in the // cache. tlfID may be ignored. IsAnyDirty(tlfID TlfID) bool // RequestPermissionToDirty is called whenever a user wants to // write data to a file. The caller provides an estimated number // of bytes that will become dirty -- this is difficult to know // exactly without pre-fetching all the blocks involved, but in // practice we can just use the number of bytes sent in via the // Write. It returns a channel that blocks until the cache is // ready to receive more dirty data, at which point the channel is // closed. The user must call // `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has // completed its write and called `UpdateUnsyncedBytes` for all // the exact dirty block sizes. RequestPermissionToDirty(ctx context.Context, tlfID TlfID, estimatedDirtyBytes int64) (DirtyPermChan, error) // UpdateUnsyncedBytes is called by a user, who has already been // granted permission to write, with the delta in block sizes that // were dirtied as part of the write. So for example, if a // newly-dirtied block of 20 bytes was extended by 5 bytes, they // should send 25. If on the next write (before any syncs), bytes // 10-15 of that same block were overwritten, they should send 0 // over the channel because there were no new bytes. If an // already-dirtied block is truncated, or if previously requested // bytes have now been updated more accurately in previous // requests, newUnsyncedBytes may be negative. wasSyncing should // be true if `BlockSyncStarted` has already been called for this // block. UpdateUnsyncedBytes(tlfID TlfID, newUnsyncedBytes int64, wasSyncing bool) // UpdateSyncingBytes is called when a particular block has // started syncing, or with a negative number when a block is no // longer syncing due to an error (and BlockSyncFinished will // never be called). UpdateSyncingBytes(tlfID TlfID, size int64) // BlockSyncFinished is called when a particular block has // finished syncing, though the overall sync might not yet be // complete. This lets the cache know it might be able to grant // more permission to writers. BlockSyncFinished(tlfID TlfID, size int64) // SyncFinished is called when a complete sync has completed and // its dirty blocks have been removed from the cache. This lets // the cache know it might be able to grant more permission to // writers. SyncFinished(tlfID TlfID, size int64) // ShouldForceSync returns true if the sync buffer is full enough // to force all callers to sync their data immediately. ShouldForceSync(tlfID TlfID) bool // Shutdown frees any resources associated with this instance. It // returns an error if there are any unsynced blocks. Shutdown() error } // cryptoPure contains all methods of Crypto that don't depend on // implicit state, i.e. they're pure functions of the input. type cryptoPure interface { // MakeRandomTlfID generates a dir ID using a CSPRNG. MakeRandomTlfID(isPublic bool) (TlfID, error) // MakeRandomBranchID generates a per-device branch ID using a CSPRNG. MakeRandomBranchID() (BranchID, error) // MakeMdID computes the MD ID of a RootMetadata object. // TODO: This should move to BareRootMetadata. Note though, that some mock tests // rely on it being part of the config and crypto_measured.go uses it to keep // statistics on time spent hashing. MakeMdID(md BareRootMetadata) (MdID, error) // MakeMerkleHash computes the hash of a RootMetadataSigned object // for inclusion into the KBFS Merkle tree. MakeMerkleHash(md *RootMetadataSigned) (MerkleHash, error) // MakeTemporaryBlockID generates a temporary block ID using a // CSPRNG. This is used for indirect blocks before they're // committed to the server. MakeTemporaryBlockID() (BlockID, error) // MakePermanentBlockID computes the permanent ID of a block // given its encoded and encrypted contents. MakePermanentBlockID(encodedEncryptedData []byte) (BlockID, error) // VerifyBlockID verifies that the given block ID is the // permanent block ID for the given encoded and encrypted // data. VerifyBlockID(encodedEncryptedData []byte, id BlockID) error // MakeRefNonce generates a block reference nonce using a // CSPRNG. This is used for distinguishing different references to // the same BlockID. MakeBlockRefNonce() (BlockRefNonce, error) // MakeRandomTLFKeys generates top-level folder keys using a CSPRNG. MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey, kbfscrypto.TLFPrivateKey, kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.TLFEphemeralPrivateKey, kbfscrypto.TLFCryptKey, error) // MakeRandomTLFCryptKeyServerHalf generates the server-side of a // top-level folder crypt key. MakeRandomTLFCryptKeyServerHalf() ( kbfscrypto.TLFCryptKeyServerHalf, error) // MakeRandomBlockCryptKeyServerHalf generates the server-side of // a block crypt key. MakeRandomBlockCryptKeyServerHalf() ( kbfscrypto.BlockCryptKeyServerHalf, error) // MaskTLFCryptKey returns the client-side of a top-level folder crypt key. MaskTLFCryptKey(serverHalf kbfscrypto.TLFCryptKeyServerHalf, key kbfscrypto.TLFCryptKey) ( kbfscrypto.TLFCryptKeyClientHalf, error) // UnmaskTLFCryptKey returns the top-level folder crypt key. UnmaskTLFCryptKey(serverHalf kbfscrypto.TLFCryptKeyServerHalf, clientHalf kbfscrypto.TLFCryptKeyClientHalf) ( kbfscrypto.TLFCryptKey, error) // UnmaskBlockCryptKey returns the block crypt key. UnmaskBlockCryptKey(serverHalf kbfscrypto.BlockCryptKeyServerHalf, tlfCryptKey kbfscrypto.TLFCryptKey) ( kbfscrypto.BlockCryptKey, error) // Verify verifies that sig matches msg being signed with the // private key that corresponds to verifyingKey. Verify(msg []byte, sigInfo kbfscrypto.SignatureInfo) error // EncryptTLFCryptKeyClientHalf encrypts a TLFCryptKeyClientHalf // using both a TLF's ephemeral private key and a device pubkey. EncryptTLFCryptKeyClientHalf( privateKey kbfscrypto.TLFEphemeralPrivateKey, publicKey kbfscrypto.CryptPublicKey, clientHalf kbfscrypto.TLFCryptKeyClientHalf) ( EncryptedTLFCryptKeyClientHalf, error) // EncryptPrivateMetadata encrypts a PrivateMetadata object. EncryptPrivateMetadata( pmd *PrivateMetadata, key kbfscrypto.TLFCryptKey) ( EncryptedPrivateMetadata, error) // DecryptPrivateMetadata decrypts a PrivateMetadata object. DecryptPrivateMetadata( encryptedPMD EncryptedPrivateMetadata, key kbfscrypto.TLFCryptKey) (*PrivateMetadata, error) // EncryptBlocks encrypts a block. plainSize is the size of the encoded // block; EncryptBlock() must guarantee that plainSize <= // len(encryptedBlock). EncryptBlock(block Block, key kbfscrypto.BlockCryptKey) ( plainSize int, encryptedBlock EncryptedBlock, err error) // DecryptBlock decrypts a block. Similar to EncryptBlock(), // DecryptBlock() must guarantee that (size of the decrypted // block) <= len(encryptedBlock). DecryptBlock(encryptedBlock EncryptedBlock, key kbfscrypto.BlockCryptKey, block Block) error // GetTLFCryptKeyServerHalfID creates a unique ID for this particular // kbfscrypto.TLFCryptKeyServerHalf. GetTLFCryptKeyServerHalfID( user keybase1.UID, deviceKID keybase1.KID, serverHalf kbfscrypto.TLFCryptKeyServerHalf) ( TLFCryptKeyServerHalfID, error) // VerifyTLFCryptKeyServerHalfID verifies the ID is the proper HMAC result. VerifyTLFCryptKeyServerHalfID(serverHalfID TLFCryptKeyServerHalfID, user keybase1.UID, deviceKID keybase1.KID, serverHalf kbfscrypto.TLFCryptKeyServerHalf) error // EncryptMerkleLeaf encrypts a Merkle leaf node with the TLFPublicKey. EncryptMerkleLeaf(leaf MerkleLeaf, pubKey kbfscrypto.TLFPublicKey, nonce *[24]byte, ePrivKey kbfscrypto.TLFEphemeralPrivateKey) ( EncryptedMerkleLeaf, error) // DecryptMerkleLeaf decrypts a Merkle leaf node with the TLFPrivateKey. DecryptMerkleLeaf(encryptedLeaf EncryptedMerkleLeaf, privKey kbfscrypto.TLFPrivateKey, nonce *[24]byte, ePubKey kbfscrypto.TLFEphemeralPublicKey) (*MerkleLeaf, error) // MakeTLFWriterKeyBundleID hashes a TLFWriterKeyBundleV3 to create an ID. MakeTLFWriterKeyBundleID(wkb *TLFWriterKeyBundleV3) (TLFWriterKeyBundleID, error) // MakeTLFReaderKeyBundleID hashes a TLFReaderKeyBundleV3 to create an ID. MakeTLFReaderKeyBundleID(rkb *TLFReaderKeyBundleV3) (TLFReaderKeyBundleID, error) // EncryptTLFCryptKeys encrypts an array of historic TLFCryptKeys. EncryptTLFCryptKeys(oldKeys []kbfscrypto.TLFCryptKey, key kbfscrypto.TLFCryptKey) ( EncryptedTLFCryptKeys, error) // DecryptTLFCryptKeys decrypts an array of historic TLFCryptKeys. DecryptTLFCryptKeys( encKeys EncryptedTLFCryptKeys, key kbfscrypto.TLFCryptKey) ( []kbfscrypto.TLFCryptKey, error) } // Duplicate kbfscrypto.Signer here to work around gomock's // limitations. type cryptoSigner interface { Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error) SignToString(context.Context, []byte) (string, error) } // Crypto signs, verifies, encrypts, and decrypts stuff. type Crypto interface { cryptoPure cryptoSigner // DecryptTLFCryptKeyClientHalf decrypts a // kbfscrypto.TLFCryptKeyClientHalf using the current device's // private key and the TLF's ephemeral public key. DecryptTLFCryptKeyClientHalf(ctx context.Context, publicKey kbfscrypto.TLFEphemeralPublicKey, encryptedClientHalf EncryptedTLFCryptKeyClientHalf) ( kbfscrypto.TLFCryptKeyClientHalf, error) // DecryptTLFCryptKeyClientHalfAny decrypts one of the // kbfscrypto.TLFCryptKeyClientHalf using the available // private keys and the ephemeral public key. If promptPaper // is true, the service will prompt the user for any unlocked // paper keys. DecryptTLFCryptKeyClientHalfAny(ctx context.Context, keys []EncryptedTLFCryptKeyClientAndEphemeral, promptPaper bool) ( kbfscrypto.TLFCryptKeyClientHalf, int, error) // Shutdown frees any resources associated with this instance. Shutdown() } // MDOps gets and puts root metadata to an MDServer. On a get, it // verifies the metadata is signed by the metadata's signing key. type MDOps interface { // GetForHandle returns the current metadata object // corresponding to the given top-level folder's handle and // merge status, if the logged-in user has read permission on // the folder. It creates the folder if one doesn't exist // yet, and the logged-in user has permission to do so. GetForHandle( ctx context.Context, handle *TlfHandle, mStatus MergeStatus) ( TlfID, ImmutableRootMetadata, error) // GetForTLF returns the current metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. GetForTLF(ctx context.Context, id TlfID) (ImmutableRootMetadata, error) // GetUnmergedForTLF is the same as the above but for unmerged // metadata. GetUnmergedForTLF(ctx context.Context, id TlfID, bid BranchID) ( ImmutableRootMetadata, error) // GetRange returns a range of metadata objects corresponding to // the passed revision numbers (inclusive). GetRange(ctx context.Context, id TlfID, start, stop MetadataRevision) ( []ImmutableRootMetadata, error) // GetUnmergedRange is the same as the above but for unmerged // metadata history (inclusive). GetUnmergedRange(ctx context.Context, id TlfID, bid BranchID, start, stop MetadataRevision) ([]ImmutableRootMetadata, error) // Put stores the metadata object for the given // top-level folder. Put(ctx context.Context, rmd *RootMetadata) (MdID, error) // PutUnmerged is the same as the above but for unmerged // metadata history. PutUnmerged(ctx context.Context, rmd *RootMetadata) (MdID, error) // PruneBranch prunes all unmerged history for the given TLF // branch. PruneBranch(ctx context.Context, id TlfID, bid BranchID) error // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. GetLatestHandleForTLF(ctx context.Context, id TlfID) ( BareTlfHandle, error) } // KeyOps fetches server-side key halves from the key server. type KeyOps interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, serverKeyHalves map[keybase1.UID]map[keybase1.KID]kbfscrypto.TLFCryptKeyServerHalf) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, kid keybase1.KID, serverHalfID TLFCryptKeyServerHalfID) error } // BlockOps gets and puts data blocks to a BlockServer. It performs // the necessary crypto operations on each block. type BlockOps interface { // Get gets the block associated with the given block pointer // (which belongs to the TLF with the given key metadata), // decrypts it if necessary, and fills in the provided block // object with its contents, if the logged-in user has read // permission for that block. Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer, block Block) error // Ready turns the given block (which belongs to the TLF with // the given key metadata) into encoded (and encrypted) data, // and calculates its ID and size, so that we can do a bunch // of block puts in parallel for every write. Ready() must // guarantee that plainSize <= readyBlockData.QuotaSize(). Ready(ctx context.Context, kmd KeyMetadata, block Block) ( id BlockID, plainSize int, readyBlockData ReadyBlockData, err error) // Delete instructs the server to delete the given block references. // It returns the number of not-yet deleted references to // each block reference Delete(ctx context.Context, tlfID TlfID, ptrs []BlockPointer) ( liveCounts map[BlockID]int, err error) // Archive instructs the server to mark the given block references // as "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. Archive(ctx context.Context, tlfID TlfID, ptrs []BlockPointer) error } // Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around // gomock's limitations. type authTokenRefreshHandler interface { RefreshAuthToken(context.Context) } // MDServer gets and puts metadata for each top-level directory. The // instantiation should be able to fetch session/user details via KBPKI. On a // put, the server is responsible for 1) ensuring the user has appropriate // permissions for whatever modifications were made; 2) ensuring that // LastModifyingWriter and LastModifyingUser are updated appropriately; and 3) // detecting conflicting writes based on the previous root block ID (i.e., when // it supports strict consistency). On a get, it verifies the logged-in user // has read permissions. // // TODO: Add interface for searching by time type MDServer interface { authTokenRefreshHandler // GetForHandle returns the current (signed/encrypted) metadata // object corresponding to the given top-level folder's handle, if // the logged-in user has read permission on the folder. It // creates the folder if one doesn't exist yet, and the logged-in // user has permission to do so. GetForHandle(ctx context.Context, handle BareTlfHandle, mStatus MergeStatus) (TlfID, *RootMetadataSigned, error) // GetForTLF returns the current (signed/encrypted) metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. GetForTLF(ctx context.Context, id TlfID, bid BranchID, mStatus MergeStatus) ( *RootMetadataSigned, error) // GetRange returns a range of (signed/encrypted) metadata objects // corresponding to the passed revision numbers (inclusive). GetRange(ctx context.Context, id TlfID, bid BranchID, mStatus MergeStatus, start, stop MetadataRevision) ([]*RootMetadataSigned, error) // Put stores the (signed/encrypted) metadata object for the given // top-level folder. Note: If the unmerged bit is set in the metadata // block's flags bitmask it will be appended to the unmerged per-device // history. Put(ctx context.Context, rmds *RootMetadataSigned, extra ExtraMetadata) error // PruneBranch prunes all unmerged history for the given TLF branch. PruneBranch(ctx context.Context, id TlfID, bid BranchID) error // RegisterForUpdate tells the MD server to inform the caller when // there is a merged update with a revision number greater than // currHead, which did NOT originate from this same MD server // session. This method returns a chan which can receive only a // single error before it's closed. If the received err is nil, // then there is updated MD ready to fetch which didn't originate // locally; if it is non-nil, then the previous registration // cannot send the next notification (e.g., the connection to the // MD server may have failed). In either case, the caller must // re-register to get a new chan that can receive future update // notifications. RegisterForUpdate(ctx context.Context, id TlfID, currHead MetadataRevision) (<-chan error, error) // CheckForRekeys initiates the rekey checking process on the // server. The server is allowed to delay this request, and so it // returns a channel for returning the error. Actual rekey // requests are expected to come in asynchronously. CheckForRekeys(ctx context.Context) <-chan error // TruncateLock attempts to take the history truncation lock for // this folder, for a TTL defined by the server. Returns true if // the lock was successfully taken. TruncateLock(ctx context.Context, id TlfID) (bool, error) // TruncateUnlock attempts to release the history truncation lock // for this folder. Returns true if the lock was successfully // released. TruncateUnlock(ctx context.Context, id TlfID) (bool, error) // DisableRekeyUpdatesForTesting disables processing rekey updates // received from the mdserver while testing. DisableRekeyUpdatesForTesting() // Shutdown is called to shutdown an MDServer connection. Shutdown() // IsConnected returns whether the MDServer is connected. IsConnected() bool // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. For the highest level of confidence, the caller // should verify the mapping with a Merkle tree lookup. GetLatestHandleForTLF(ctx context.Context, id TlfID) ( BareTlfHandle, error) // OffsetFromServerTime is the current estimate for how off our // local clock is from the mdserver clock. Add this to any // mdserver-provided timestamps to get the "local" time of the // corresponding event. If the returned bool is false, then we // don't have a current estimate for the offset. OffsetFromServerTime() (time.Duration, bool) // GetKeyBundles returns the key bundles for the given key bundle IDs. GetKeyBundles(ctx context.Context, wkbID TLFWriterKeyBundleID, rkbID TLFReaderKeyBundleID) ( *TLFWriterKeyBundleV3, *TLFReaderKeyBundleV3, error) } type mdServerLocal interface { MDServer addNewAssertionForTest( uid keybase1.UID, newAssertion keybase1.SocialAssertion) error getCurrentMergedHeadRevision(ctx context.Context, id TlfID) ( rev MetadataRevision, err error) isShutdown() bool copy(config mdServerLocalConfig) mdServerLocal } // BlockServer gets and puts opaque data blocks. The instantiation // should be able to fetch session/user details via KBPKI. On a // put/delete, the server is reponsible for: 1) checking that the ID // matches the hash of the buffer; and 2) enforcing writer quotas. type BlockServer interface { authTokenRefreshHandler // Get gets the (encrypted) block data associated with the given // block ID and context, uses the provided block key to decrypt // the block, and fills in the provided block object with its // contents, if the logged-in user has read permission for that // block. Get(ctx context.Context, tlfID TlfID, id BlockID, context BlockContext) ( []byte, kbfscrypto.BlockCryptKeyServerHalf, error) // Put stores the (encrypted) block data under the given ID and // context on the server, along with the server half of the block // key. context should contain a BlockRefNonce of zero. There // will be an initial reference for this block for the given // context. // // Put should be idempotent, although it should also return an // error if, for a given ID, any of the other arguments differ // from previous Put calls with the same ID. // // If this returns a BServerErrorOverQuota, with Throttled=false, // the caller can treat it as informational and otherwise ignore // the error. Put(ctx context.Context, tlfID TlfID, id BlockID, context BlockContext, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // AddBlockReference adds a new reference to the given block, // defined by the given context (which should contain a non-zero // BlockRefNonce). (Contexts with a BlockRefNonce of zero should // be used when putting the block for the first time via Put().) // Returns a BServerErrorBlockNonExistent if id is unknown within // this folder. // // AddBlockReference should be idempotent, although it should // also return an error if, for a given ID and refnonce, any // of the other fields of context differ from previous // AddBlockReference calls with the same ID and refnonce. // // If this returns a BServerErrorOverQuota, with Throttled=false, // the caller can treat it as informational and otherwise ignore // the error. AddBlockReference(ctx context.Context, tlfID TlfID, id BlockID, context BlockContext) error // RemoveBlockReferences removes the references to the given block // ID defined by the given contexts. If no references to the block // remain after this call, the server is allowed to delete the // corresponding block permanently. If the reference defined by // the count has already been removed, the call is a no-op. // It returns the number of remaining not-yet-deleted references after this // reference has been removed RemoveBlockReferences(ctx context.Context, tlfID TlfID, contexts map[BlockID][]BlockContext) (liveCounts map[BlockID]int, err error) // ArchiveBlockReferences marks the given block references as // "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. // // For a given ID/refnonce pair, ArchiveBlockReferences should // be idempotent, although it should also return an error if // any of the other fields of the context differ from previous // calls with the same ID/refnonce pair. ArchiveBlockReferences(ctx context.Context, tlfID TlfID, contexts map[BlockID][]BlockContext) error // Shutdown is called to shutdown a BlockServer connection. Shutdown() // GetUserQuotaInfo returns the quota for the user. GetUserQuotaInfo(ctx context.Context) (info *UserQuotaInfo, err error) } type blockRefLocalStatus int const ( liveBlockRef blockRefLocalStatus = 1 archivedBlockRef = 2 ) // blockServerLocal is the interface for BlockServer implementations // that store data locally. type blockServerLocal interface { BlockServer // getAll returns all the known block references, and should only be // used during testing. getAll(ctx context.Context, tlfID TlfID) ( map[BlockID]map[BlockRefNonce]blockRefLocalStatus, error) } // BlockSplitter decides when a file or directory block needs to be split type BlockSplitter interface { // CopyUntilSplit copies data into the block until we reach the // point where we should split, but only if writing to the end of // the last block. If this is writing into the middle of a file, // just copy everything that will fit into the block, and assume // that block boundaries will be fixed later. Return how much was // copied. CopyUntilSplit( block *FileBlock, lastBlock bool, data []byte, off int64) int64 // CheckSplit, given a block, figures out whether it ends at the // right place. If so, return 0. If not, return either the // offset in the block where it should be split, or -1 if more // bytes from the next block should be appended. CheckSplit(block *FileBlock) int64 // ShouldEmbedBlockChanges decides whether we should keep the // block changes embedded in the MD or not. ShouldEmbedBlockChanges(bc *BlockChanges) bool } // KeyServer fetches/writes server-side key halves from/to the key server. type KeyServer interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, serverKeyHalves map[keybase1.UID]map[keybase1.KID]kbfscrypto.TLFCryptKeyServerHalf) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, kid keybase1.KID, serverHalfID TLFCryptKeyServerHalfID) error // Shutdown is called to free any KeyServer resources. Shutdown() } // NodeChange represents a change made to a node as part of an atomic // file system operation. type NodeChange struct { Node Node // Basenames of entries added/removed. DirUpdated []string FileUpdated []WriteRange } // Observer can be notified that there is an available update for a // given directory. The notification callbacks should not block, or // make any calls to the Notifier interface. Nodes passed to the // observer should not be held past the end of the notification // callback. type Observer interface { // LocalChange announces that the file at this Node has been // updated locally, but not yet saved at the server. LocalChange(ctx context.Context, node Node, write WriteRange) // BatchChanges announces that the nodes have all been updated // together atomically. Each NodeChange in changes affects the // same top-level folder and branch. BatchChanges(ctx context.Context, changes []NodeChange) // TlfHandleChange announces that the handle of the corresponding // folder branch has changed, likely due to previously-unresolved // assertions becoming resolved. This indicates that the listener // should switch over any cached paths for this folder-branch to // the new name. Nodes that were acquired under the old name will // still continue to work, but new lookups on the old name may // either encounter alias errors or entirely new TLFs (in the case // of conflicts). TlfHandleChange(ctx context.Context, newHandle *TlfHandle) } // Notifier notifies registrants of directory changes type Notifier interface { // RegisterForChanges declares that the given Observer wants to // subscribe to updates for the given top-level folders. RegisterForChanges(folderBranches []FolderBranch, obs Observer) error // UnregisterFromChanges declares that the given Observer no // longer wants to subscribe to updates for the given top-level // folders. UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error } // Clock is an interface for getting the current time type Clock interface { // Now returns the current time. Now() time.Time } // ConflictRenamer deals with names for conflicting directory entries. type ConflictRenamer interface { // ConflictRename returns the appropriately modified filename. ConflictRename(op op, original string) string } // Config collects all the singleton instance instantiations needed to // run KBFS in one place. The methods below are self-explanatory and // do not require comments. type Config interface { KBFSOps() KBFSOps SetKBFSOps(KBFSOps) KBPKI() KBPKI SetKBPKI(KBPKI) KeyManager() KeyManager SetKeyManager(KeyManager) Reporter() Reporter SetReporter(Reporter) MDCache() MDCache SetMDCache(MDCache) KeyCache() KeyCache SetKeyCache(KeyCache) BlockCache() BlockCache SetBlockCache(BlockCache) DirtyBlockCache() DirtyBlockCache SetDirtyBlockCache(DirtyBlockCache) Crypto() Crypto SetCrypto(Crypto) Codec() kbfscodec.Codec SetCodec(kbfscodec.Codec) MDOps() MDOps SetMDOps(MDOps) KeyOps() KeyOps SetKeyOps(KeyOps) BlockOps() BlockOps SetBlockOps(BlockOps) MDServer() MDServer SetMDServer(MDServer) BlockServer() BlockServer SetBlockServer(BlockServer) KeyServer() KeyServer SetKeyServer(KeyServer) KeybaseService() KeybaseService SetKeybaseService(KeybaseService) BlockSplitter() BlockSplitter SetBlockSplitter(BlockSplitter) Notifier() Notifier SetNotifier(Notifier) Clock() Clock SetClock(Clock) ConflictRenamer() ConflictRenamer SetConflictRenamer(ConflictRenamer) MetadataVersion() MetadataVer DataVersion() DataVer RekeyQueue() RekeyQueue SetRekeyQueue(RekeyQueue) // ReqsBufSize indicates the number of read or write operations // that can be buffered per folder ReqsBufSize() int // MaxFileBytes indicates the maximum supported plaintext size of // a file in bytes. MaxFileBytes() uint64 // MaxNameBytes indicates the maximum supported size of a // directory entry name in bytes. MaxNameBytes() uint32 // MaxDirBytes indicates the maximum supported plaintext size of a // directory in bytes. MaxDirBytes() uint64 // DoBackgroundFlushes says whether we should periodically try to // flush dirty files, even without a sync from the user. Should // be true except for during some testing. DoBackgroundFlushes() bool SetDoBackgroundFlushes(bool) // RekeyWithPromptWaitTime indicates how long to wait, after // setting the rekey bit, before prompting for a paper key. RekeyWithPromptWaitTime() time.Duration // GracePeriod specifies a grace period for which a delayed cancellation // waits before actual cancels the context. This is useful for giving // critical portion of a slow remote operation some extra time to finish as // an effort to avoid conflicting. Example include an O_EXCL Create call // interrupted by ALRM signal actually makes it to the server, while // application assumes not since EINTR is returned. A delayed cancellation // allows us to distinguish between successful cancel (where remote operation // didn't make to server) or failed cancel (where remote operation made to // the server). However, the optimal value of this depends on the network // conditions. A long grace period for really good network condition would // just unnecessarily slow down Ctrl-C. // // TODO: make this adaptive and self-change over time based on network // conditions. DelayedCancellationGracePeriod() time.Duration SetDelayedCancellationGracePeriod(time.Duration) // QuotaReclamationPeriod indicates how often should each TLF // should check for quota to reclaim. If the Duration.Seconds() // == 0, quota reclamation should not run automatically. QuotaReclamationPeriod() time.Duration // QuotaReclamationMinUnrefAge indicates the minimum time a block // must have been unreferenced before it can be reclaimed. QuotaReclamationMinUnrefAge() time.Duration // QuotaReclamationMinHeadAge indicates the minimum age of the // most recently merged MD update before we can run reclamation, // to avoid conflicting with a currently active writer. QuotaReclamationMinHeadAge() time.Duration // ResetCaches clears and re-initializes all data and key caches. ResetCaches() MakeLogger(module string) logger.Logger SetLoggerMaker(func(module string) logger.Logger) // MetricsRegistry may be nil, which should be interpreted as // not using metrics at all. (i.e., as if UseNilMetrics were // set). This differs from how go-metrics treats nil Registry // objects, which is to use the default registry. MetricsRegistry() metrics.Registry SetMetricsRegistry(metrics.Registry) // TLFValidDuration is the time TLFs are valid before identification needs to be redone. TLFValidDuration() time.Duration // SetTLFValidDuration sets TLFValidDuration. SetTLFValidDuration(time.Duration) // Shutdown is called to free config resources. Shutdown() error // CheckStateOnShutdown tells the caller whether or not it is safe // to check the state of the system on shutdown. CheckStateOnShutdown() bool } // NodeCache holds Nodes, and allows libkbfs to update them when // things change about the underlying KBFS blocks. It is probably // most useful to instantiate this on a per-folder-branch basis, so // that it can create a Path with the correct DirId and Branch name. type NodeCache interface { // GetOrCreate either makes a new Node for the given // BlockPointer, or returns an existing one. TODO: If we ever // support hard links, we will have to revisit the "name" and // "parent" parameters here. name must not be empty. Returns // an error if parent cannot be found. GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error) // Get returns the Node associated with the given ptr if one // already exists. Otherwise, it returns nil. Get(ref blockRef) Node // UpdatePointer updates the BlockPointer for the corresponding // Node. NodeCache ignores this call when oldRef is not cached in // any Node. UpdatePointer(oldRef blockRef, newPtr BlockPointer) // Move swaps the parent node for the corresponding Node, and // updates the node's name. NodeCache ignores the call when ptr // is not cached. Returns an error if newParent cannot be found. // If newParent is nil, it treats the ptr's corresponding node as // being unlinked from the old parent completely. Move(ref blockRef, newParent Node, newName string) error // Unlink set the corresponding node's parent to nil and caches // the provided path in case the node is still open. NodeCache // ignores the call when ptr is not cached. The path is required // because the caller may have made changes to the parent nodes // already that shouldn't be reflected in the cached path. Unlink(ref blockRef, oldPath path) // PathFromNode creates the path up to a given Node. PathFromNode(node Node) path } // fileBlockDeepCopier fetches a file block, makes a deep copy of it // (duplicating pointer for any indirect blocks) and generates a new // random temporary block ID for it. It returns the new BlockPointer, // and internally saves the block for future uses. type fileBlockDeepCopier func(context.Context, string, BlockPointer) ( BlockPointer, error) // crAction represents a specific action to take as part of the // conflict resolution process. type crAction interface { // swapUnmergedBlock should be called before do(), and if it // returns true, the caller must use the merged block // corresponding to the returned BlockPointer instead of // unmergedBlock when calling do(). If BlockPointer{} is zeroPtr // (and true is returned), just swap in the regular mergedBlock. swapUnmergedBlock(unmergedChains *crChains, mergedChains *crChains, unmergedBlock *DirBlock) (bool, BlockPointer, error) // do modifies the given merged block in place to resolve the // conflict, and potential uses the provided blockCopyFetchers to // obtain copies of other blocks (along with new BlockPointers) // when requiring a block copy. do(ctx context.Context, unmergedCopier fileBlockDeepCopier, mergedCopier fileBlockDeepCopier, unmergedBlock *DirBlock, mergedBlock *DirBlock) error // updateOps potentially modifies, in place, the slices of // unmerged and merged operations stored in the corresponding // crChains for the given unmerged and merged most recent // pointers. Eventually, the "unmerged" ops will be pushed as // part of a MD update, and so should contain any necessarily // operations to fully merge the unmerged data, including any // conflict resolution. The "merged" ops will be played through // locally, to notify any caches about the newly-obtained merged // data (and any changes to local data that were required as part // of conflict resolution, such as renames). A few things to note: // * A particular action's updateOps method may be called more than // once for different sets of chains, however it should only add // new directory operations (like create/rm/rename) into directory // chains. // * updateOps doesn't necessarily result in correct BlockPointers within // each of those ops; that must happen in a later phase. // * mergedBlock can be nil if the chain is for a file. updateOps(unmergedMostRecent BlockPointer, mergedMostRecent BlockPointer, unmergedBlock *DirBlock, mergedBlock *DirBlock, unmergedChains *crChains, mergedChains *crChains) error // String returns a string representation for this crAction, used // for debugging. String() string } // RekeyQueue is a managed queue of folders needing some rekey action taken upon them // by the current client. type RekeyQueue interface { // Enqueue enqueues a folder for rekey action. Enqueue(TlfID) <-chan error // IsRekeyPending returns true if the given folder is in the rekey queue. IsRekeyPending(TlfID) bool // GetRekeyChannel will return any rekey completion channel (if pending.) GetRekeyChannel(id TlfID) <-chan error // Clear cancels all pending rekey actions and clears the queue. Clear() // Waits for all queued rekeys to finish Wait(ctx context.Context) error } // BareRootMetadata is a read-only interface to the bare serializeable MD that // is signed by the reader or writer. type BareRootMetadata interface { // TlfID returns the ID of the TLF this BareRootMetadata is for. TlfID() TlfID // LatestKeyGeneration returns the most recent key generation in this // BareRootMetadata, or PublicKeyGen if this TLF is public. LatestKeyGeneration() KeyGen // IsValidRekeyRequest returns true if the current block is a simple rekey wrt // the passed block. IsValidRekeyRequest(codec kbfscodec.Codec, prevMd BareRootMetadata, user keybase1.UID, prevExtra, extra ExtraMetadata) (bool, error) // MergedStatus returns the status of this update -- has it been // merged into the main folder or not? MergedStatus() MergeStatus // IsRekeySet returns true if the rekey bit is set. IsRekeySet() bool // IsWriterMetadataCopiedSet returns true if the bit is set indicating // the writer metadata was copied. IsWriterMetadataCopiedSet() bool // IsFinal returns true if this is the last metadata block for a given // folder. This is only expected to be set for folder resets. IsFinal() bool // IsWriter returns whether or not the user+device is an authorized writer. IsWriter(user keybase1.UID, deviceKID keybase1.KID, extra ExtraMetadata) bool // IsReader returns whether or not the user+device is an authorized reader. IsReader(user keybase1.UID, deviceKID keybase1.KID, extra ExtraMetadata) bool // DeepCopy returns a deep copy of the underlying data structure. DeepCopy(codec kbfscodec.Codec) (BareRootMetadata, error) // MakeSuccessorCopy returns a newly constructed successor copy to this metadata revision. // It differs from DeepCopy in that it can perform an up conversion to a new metadata // version. MakeSuccessorCopy(codec kbfscodec.Codec) (BareRootMetadata, error) // CheckValidSuccessor makes sure the given BareRootMetadata is a valid // successor to the current one, and returns an error otherwise. CheckValidSuccessor(currID MdID, nextMd BareRootMetadata) error // CheckValidSuccessorForServer is like CheckValidSuccessor but with // server-specific error messages. CheckValidSuccessorForServer(currID MdID, nextMd BareRootMetadata) error // MakeBareTlfHandle makes a BareTlfHandle for this // BareRootMetadata. Should be used only by servers and MDOps. MakeBareTlfHandle(extra ExtraMetadata) (BareTlfHandle, error) // TlfHandleExtensions returns a list of handle extensions associated with the TLf. TlfHandleExtensions() (extensions []TlfHandleExtension) // GetDeviceKIDs returns the KIDs (of // kbfscrypto.CryptPublicKeys) for all known devices for the // given user at the given key generation, if any. Returns an // error if the TLF is public, or if the given key generation // is invalid. GetDeviceKIDs(keyGen KeyGen, user keybase1.UID, extra ExtraMetadata) ( []keybase1.KID, error) // HasKeyForUser returns whether or not the given user has keys for at // least one device at the given key generation. Returns false if the // TLF is public, or if the given key generation is invalid. Equivalent to: // // kids, err := GetDeviceKIDs(keyGen, user) // return (err == nil) && (len(kids) > 0) HasKeyForUser(keyGen KeyGen, user keybase1.UID, extra ExtraMetadata) bool // GetTLFCryptKeyParams returns all the necessary info to construct // the TLF crypt key for the given key generation, user, and device // (identified by its crypt public key), or false if not found. This // returns an error if the TLF is public. GetTLFCryptKeyParams(keyGen KeyGen, user keybase1.UID, key kbfscrypto.CryptPublicKey, extra ExtraMetadata) ( kbfscrypto.TLFEphemeralPublicKey, EncryptedTLFCryptKeyClientHalf, TLFCryptKeyServerHalfID, bool, error) // IsValidAndSigned verifies the BareRootMetadata, checks the // writer signature, and returns an error if a problem was // found. This should be the first thing checked on a BRMD // retrieved from an untrusted source, and then the signing // user and key should be validated, either by comparing to // the current device key (using IsLastModifiedBy), or by // checking with KBPKI. IsValidAndSigned(codec kbfscodec.Codec, crypto cryptoPure, extra ExtraMetadata) error // IsLastModifiedBy verifies that the BareRootMetadata is // written by the given user and device (identified by the KID // of the device verifying key), and returns an error if not. IsLastModifiedBy(uid keybase1.UID, key kbfscrypto.VerifyingKey) error // LastModifyingWriter return the UID of the last user to modify the writer metadata. LastModifyingWriter() keybase1.UID // LastModifyingWriterKID returns the KID of the last device to modify the writer metadata. LastModifyingWriterKID() keybase1.KID // LastModifyingUser return the UID of the last user to modify the any of the metadata. GetLastModifyingUser() keybase1.UID // RefBytes returns the number of newly referenced bytes introduced by this revision of metadata. RefBytes() uint64 // UnrefBytes returns the number of newly unreferenced bytes introduced by this revision of metadata. UnrefBytes() uint64 // DiskUsage returns the estimated disk usage for the folder as of this revision of metadata. DiskUsage() uint64 // RevisionNumber returns the revision number associated with this metadata structure. RevisionNumber() MetadataRevision // BID returns the per-device branch ID associated with this metadata revision. BID() BranchID // GetPrevRoot returns the hash of the previous metadata revision. GetPrevRoot() MdID // IsUnmergedSet returns true if the unmerged bit is set. IsUnmergedSet() bool // GetSerializedPrivateMetadata returns the serialized private metadata as a byte slice. GetSerializedPrivateMetadata() []byte // GetSerializedWriterMetadata serializes the underlying writer metadata and returns the result. GetSerializedWriterMetadata(codec kbfscodec.Codec) ([]byte, error) // GetWriterMetadataSigInfo returns the signature info associated with the the writer metadata. GetWriterMetadataSigInfo() kbfscrypto.SignatureInfo // Version returns the metadata version. Version() MetadataVer // GetTLFPublicKey returns the TLF public key for the give key generation. // Note the *TLFWriterKeyBundleV3 is expected to be nil for pre-v3 metadata. GetTLFPublicKey(KeyGen, ExtraMetadata) (kbfscrypto.TLFPublicKey, bool) // AreKeyGenerationsEqual returns true if all key generations in the passed metadata are equal to those // in this revision. AreKeyGenerationsEqual(kbfscodec.Codec, BareRootMetadata) (bool, error) // GetUnresolvedParticipants returns any unresolved readers and writers present in this revision of metadata. GetUnresolvedParticipants() (readers, writers []keybase1.SocialAssertion) // GetTLFWriterKeyBundleID returns the ID of the externally-stored writer key bundle, or the zero value if // this object stores it internally. GetTLFWriterKeyBundleID() TLFWriterKeyBundleID // GetTLFReaderKeyBundleID returns the ID of the externally-stored reader key bundle, or the zero value if // this object stores it internally. GetTLFReaderKeyBundleID() TLFReaderKeyBundleID // StoresHistoricTLFCryptKeys returns whether or not history keys are symmetrically encrypted; if not, they're // encrypted per-device. StoresHistoricTLFCryptKeys() bool // GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given // generation using the current generation's TLFCryptKey. GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen, currentKey kbfscrypto.TLFCryptKey, extra ExtraMetadata) ( kbfscrypto.TLFCryptKey, error) } // MutableBareRootMetadata is a mutable interface to the bare serializeable MD that is signed by the reader or writer. type MutableBareRootMetadata interface { BareRootMetadata // SetRefBytes sets the number of newly referenced bytes introduced by this revision of metadata. SetRefBytes(refBytes uint64) // SetUnrefBytes sets the number of newly unreferenced bytes introduced by this revision of metadata. SetUnrefBytes(unrefBytes uint64) // SetDiskUsage sets the estimated disk usage for the folder as of this revision of metadata. SetDiskUsage(diskUsage uint64) // AddRefBytes increments the number of newly referenced bytes introduced by this revision of metadata. AddRefBytes(refBytes uint64) // AddUnrefBytes increments the number of newly unreferenced bytes introduced by this revision of metadata. AddUnrefBytes(unrefBytes uint64) // AddDiskUsage increments the estimated disk usage for the folder as of this revision of metadata. AddDiskUsage(diskUsage uint64) // ClearRekeyBit unsets any set rekey bit. ClearRekeyBit() // ClearWriterMetadataCopiedBit unsets any set writer metadata copied bit. ClearWriterMetadataCopiedBit() // ClearFinalBit unsets any final bit. ClearFinalBit() // SetUnmerged sets the unmerged bit. SetUnmerged() // SetBranchID sets the branch ID for this metadata revision. SetBranchID(bid BranchID) // SetPrevRoot sets the hash of the previous metadata revision. SetPrevRoot(mdID MdID) // SetSerializedPrivateMetadata sets the serialized private metadata. SetSerializedPrivateMetadata(spmd []byte) // SetWriterMetadataSigInfo sets the signature info associated with the the writer metadata. SetWriterMetadataSigInfo(sigInfo kbfscrypto.SignatureInfo) // SetLastModifyingWriter sets the UID of the last user to modify the writer metadata. SetLastModifyingWriter(user keybase1.UID) // SetLastModifyingUser sets the UID of the last user to modify any of the metadata. SetLastModifyingUser(user keybase1.UID) // SetRekeyBit sets the rekey bit. SetRekeyBit() // SetFinalBit sets the finalized bit. SetFinalBit() // SetWriterMetadataCopiedBit set the writer metadata copied bit. SetWriterMetadataCopiedBit() // SetRevision sets the revision number of the underlying metadata. SetRevision(revision MetadataRevision) // AddNewKeysForTesting adds new writer and reader TLF key bundles to this revision of metadata. // Note: This is only used for testing at the moment. AddNewKeysForTesting(crypto cryptoPure, wDkim, rDkim UserDeviceKeyInfoMap) (ExtraMetadata, error) // NewKeyGeneration adds a new key generation to this revision of metadata. NewKeyGeneration(pubKey kbfscrypto.TLFPublicKey) (extra ExtraMetadata) // SetUnresolvedReaders sets the list of unresolved readers assoiated with this folder. SetUnresolvedReaders(readers []keybase1.SocialAssertion) // SetUnresolvedWriters sets the list of unresolved writers assoiated with this folder. SetUnresolvedWriters(writers []keybase1.SocialAssertion) // SetConflictInfo sets any conflict info associated with this metadata revision. SetConflictInfo(ci *TlfHandleExtension) // SetFinalizedInfo sets any finalized info associated with this metadata revision. SetFinalizedInfo(fi *TlfHandleExtension) // SetWriters sets the list of writers associated with this folder. SetWriters(writers []keybase1.UID) // SetTlfID sets the ID of the underlying folder in the metadata structure. SetTlfID(tlf TlfID) // FakeInitialRekey fakes the initial rekey for the given // BareRootMetadata. This is necessary since newly-created // BareRootMetadata objects don't have enough data to build a // TlfHandle from until the first rekey. FakeInitialRekey(c cryptoPure, h BareTlfHandle) (ExtraMetadata, error) // Update initializes the given freshly-created BareRootMetadata object with // the given TlfID and BareTlfHandle. Note that if the given ID/handle are private, // rekeying must be done separately. Update(tlf TlfID, h BareTlfHandle) error // Returns the TLF key bundles for this metadata at the given key generation. // MDv3 TODO: Get rid of this. GetTLFKeyBundles(keyGen KeyGen) (*TLFWriterKeyBundleV2, *TLFReaderKeyBundleV2, error) // GetUserDeviceKeyInfoMaps returns the given user device key info maps for the given // key generation. GetUserDeviceKeyInfoMaps(keyGen KeyGen, extra ExtraMetadata) ( readers, writers UserDeviceKeyInfoMap, err error) // FinalizeRekey is called after all rekeying work has been performed on the underlying // metadata. FinalizeRekey(c cryptoPure, prevKey, key kbfscrypto.TLFCryptKey, extra ExtraMetadata) error } // KeyBundleCache is an interface to a key bundle cache for use with v3 metadata. type KeyBundleCache interface { // GetTLFReaderKeyBundle returns the TLFReaderKeyBundleV2 for the given TLFReaderKeyBundleID. GetTLFReaderKeyBundle(TLFReaderKeyBundleID) (TLFReaderKeyBundleV3, bool) // GetTLFWriterKeyBundle returns the TLFWriterKeyBundleV3 for the given TLFWriterKeyBundleID. GetTLFWriterKeyBundle(TLFWriterKeyBundleID) (TLFWriterKeyBundleV3, bool) // PutTLFReaderKeyBundle stores the given TLFReaderKeyBundleV2. PutTLFReaderKeyBundle(TLFReaderKeyBundleV3) // PutTLFWriterKeyBundle stores the given TLFWriterKeyBundleV3. PutTLFWriterKeyBundle(TLFWriterKeyBundleV3) }
1
13,738
Could combine this with `encryptionKeyGetter` to have a single `keyGetter` interface. I'm not sure which way is better.
keybase-kbfs
go
@@ -6316,7 +6316,8 @@ Lng32 SQL_EXEC_DeleteHbaseJNI() threadContext->incrNumOfCliCalls(); HBaseClient_JNI::deleteInstance(); - HiveClient_JNI::deleteInstance(); + // The Hive client persists across connections + // HiveClient_JNI::deleteInstance(); } catch(...) {
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ****************************************************************************** * * File: CliExtern.cpp * Description: Separation of Cli.cpp into a stub and client routine. * Originally done to help with NT work on single-threading * access to the CLI routines. Will help to segregate * other work like DLL/Library work etc. * * Created: 7/19/97 * Language: C and C++ * * * ****************************************************************************** */ #include "Platform.h" #include "wstr.h" #include "cli_stdh.h" #include "ExpError.h" #include "NLSConversion.h" #include "Cli.h" #include "ComRtUtils.h" #include "CliSemaphore.h" #include "fs/feerrors.h" #include <time.h> #include "cextdecs/cextdecs.h" #include "logmxevent.h" //#include "NAString.h" #include "guardian/kphandlz.h" #include "guardian/ddctctlz.h" #ifndef pdctctlz_h_dct_get_by_name_ #define pdctctlz_h_dct_get_by_name_ // so that we only get dct_get_by_name #endif #ifndef pdctctlz_h_including_section #define pdctctlz_h_including_section // from pdctctlz.h #endif #ifndef pdctctlz_h_ #define pdctctlz_h_ #endif #include "guardian/pdctctlz.h" //#include "ComRegAPI.h" #include "memorymonitor.h" #include "dfs2rec.h" #include "ExMeas.h" #include "Statement.h" #include "ComSqlId.h" CLISemaphore globalSemaphore ; #include "seabed/ms.h" #include "seabed/fs.h" #include "seabed/fserr.h" #include "seabed/thread.h" #include "seabed/sqstatehi.h" #include "SqlStats.h" #include "ComExeTrace.h" #include "Context.h" #include "HBaseClient_JNI.h" #ifndef CLI_PRIV_SRL #pragma warning (disable : 4273) //warning elimination extern char ** environ; #pragma warning (default : 4273) //warning elimination #endif #ifndef CLI_PRIV_SRL // this is set to true after the first CLI call. // On the first cli call, method CliNonPrivPrologue is called. // Only used for the top level CLI calls and not if cli is called // from within priv srl. NABoolean __CLI_NONPRIV_INIT__ = FALSE; #endif #ifndef CLI_PRIV_SRL #define CLI_NONPRIV_PROLOGUE(rc) \ if (NOT __CLI_NONPRIV_INIT__) \ { \ rc = CliNonPrivPrologue(); \ if (rc) \ return rc; \ __CLI_NONPRIV_INIT__ = TRUE; \ } #define CLI_NONPRIV_PROLOGUE_SHORT(rc) \ if (NOT __CLI_NONPRIV_INIT__) \ { \ rc = (short) CliNonPrivPrologue(); \ if (rc) \ return rc; \ __CLI_NONPRIV_INIT__ = TRUE; \ } #endif #ifndef CLI_PRIV_SRL #if 0 // GetConditionCount // // Find the count of conditions (SQLDIAG_NUMBER) related to // the current statement Int32 GetConditionCount(void) { Int32 cond_count; SQLMODULE_ID module; Lng32 stmt_items[1]; module.module_name = 0; module.module_name_len = 0; SQLDESC_ID cond_desc = { 1, desc_name, &module, "dummy_input_name", 0, 0, 10 }; SQL_EXEC_AllocDesc(&cond_desc,1); SQL_EXEC_SetDescItem(&cond_desc, 1, SQLDESC_TYPE_FS, REC_BIN32_SIGNED, 0); SQL_EXEC_SetDescItem(&cond_desc, 1, SQLDESC_VAR_PTR, (Long) &cond_count, 0); stmt_items[0] = SQLDIAG_NUMBER; SQL_EXEC_GetDiagnosticsStmtInfo(stmt_items, &cond_desc); SQL_EXEC_DeallocDesc(&cond_desc); return (cond_count); } #endif // Check if the current system default experience level and the error experience levels are // compatible. Compatible here means that the error experience level is higher or equal to // the system default experience level. For instance, the ADVANCED experience level is higher // than the BEGINNER experience level. // SQSTATE segment start here Int32 muse(NAHeap *heap, size_t minTotalSize, char* repBuffer, size_t maxRspSize, size_t *rspSize, bool *bufferFull); extern CliGlobals *cli_globals; enum { MAX_IC_ARGS = 20 }; enum { MAX_RSP = 1024 * 1024 }; // 1MB // module name #define MYMODULE exestate #define MYMODULESTR "exestate" // ic externs //SQSTATE_IC_EXTERN(MYMODULE,t1); // pi externs //SQSTATE_PI_EXTERN(MYMODULE,t1); // // ic entry point for IPC info // SQSTATE_IC_EP(MYMODULE,ipc,sre) { int arg; int argc; char *argv[MAX_IC_ARGS]; char rsp[MAX_RSP]; int rsp_len=0; char *myProgName; char buffer[80]; sprintf(buffer, "CliGlobals %p", cli_globals); if (sqstateic_get_ic_args(sre, &argc, &argv[0], MAX_IC_ARGS, rsp, MAX_RSP, &rsp_len)) { if (NULL==cli_globals) { rsp_len += sprintf(&rsp[rsp_len], "Cli not initialized, no ipc information available\n\n"); } else if (NULL==cli_globals->exCollHeap()) { rsp_len += sprintf(&rsp[rsp_len], "No Executor Heap, no ipc information available\n\n"); } else if (NULL==cli_globals->getEnvironment()) { rsp_len += sprintf(&rsp[rsp_len], "IPC Environment not initialized, no ipc information available\n\n"); } else { NAHeap *executorHeap = (NAHeap *)cli_globals->exCollHeap(); IpcEnvironment *env = cli_globals->getEnvironment(); IpcAllConnections *allc = env->getAllConnections(); IpcSetOfConnections pendingIOs = allc->getPendingIOs(); CollIndex allEntries = env->getAllConnections()->entries(); CollIndex pendingIOEntries = env->getAllConnections()->getNumPendingIOs(); bool infoAllConnections = false; char allConnectionsArgVal[] = "allconnections"; if (argc > 0 && strlen(argv[0]) <= strlen(allConnectionsArgVal) && (memcmp(argv[0], allConnectionsArgVal, strlen(argv[0])) == 0)) infoAllConnections = true; if (strlen(cli_globals->myProgName()) == 0) cli_globals->initMyProgName(); myProgName = cli_globals->myProgName(); rsp_len += sprintf(&rsp[rsp_len], "in %s/ic-%s/%s\n", myProgName, MYMODULESTR, "ipc"); rsp[rsp_len++] = '\n'; rsp_len += sprintf(&rsp[rsp_len], "IpcEnvironment: allConnections_ %d, pendingIOs_ %d\n", allEntries, pendingIOEntries); if (infoAllConnections) allc->infoAllConnections(rsp, MAX_RSP, &rsp_len); else pendingIOs.infoPendingConnections(rsp, MAX_RSP, &rsp_len); } sqstateic_reply(sre, rsp, rsp_len); } } // // pi entry point for IPC info // SQSTATE_PI_EP(MYMODULE,ipc,node,proc,info,lib) { char rsp[MAX_RSP]; int rsp_len; sqstatepi_printf("%s/%s: pi\n", MYMODULESTR, "ipc"); if ((MS_Mon_Node_Info_Entry_Type *)NULL == node) { sqstatepi_printf(" Invalid node\n"); return; } if ((MS_Mon_Process_Info_Type *)NULL == proc) { sqstatepi_printf(" Invalid proc\n"); return; } sqstatepi_printf(" node-info. nname=%s, nid=%d\n", node->node_name, node->nid); sqstatepi_printf(" proc-info. pname=%s, p-id=%d/%d\n", proc->process_name, proc->nid, proc->pid); sqstatepi_printf(" plugin-info. verbose=%d, verbosev=%d\n", info->verbose, info->verbosev); if (sqstatepi_send_ic_ok(MYMODULESTR, // module "ipc", // call node, // node proc, // proc info, // info lib, // library rsp, // rsp MAX_RSP, // rsp-len &rsp_len)) // rsp-len sqstatepi_print_ic_reply(rsp, rsp_len); } // // ic entry point for muse // SQSTATE_IC_EP(MYMODULE,muse,sre) { int arg; int argc; char *argv[MAX_IC_ARGS]; char rsp[MAX_RSP]; int rsp_len = 0; size_t muse_len; size_t size = 64; char *myProgName; bool bufferFull; if (sqstateic_get_ic_args(sre, &argc, &argv[0], MAX_IC_ARGS, rsp, MAX_RSP, &rsp_len)) { if (NULL==cli_globals) { rsp_len += sprintf(&rsp[rsp_len], "Cli not initialized. No memory information available\n\n"); } else if (NULL==cli_globals->exCollHeap()) { rsp_len += sprintf(&rsp[rsp_len], "Executor heap not initialized, No memory information available\n\n"); } else { NAHeap *executorHeap = (NAHeap *)cli_globals->exCollHeap(); if (argc > 0) size = atoi(argv[0]); if (strlen(cli_globals->myProgName()) == 0) cli_globals->initMyProgName(); myProgName = cli_globals->myProgName(); rsp_len += sprintf(&rsp[rsp_len], "in %s/ic-%s/%s\n", myProgName, MYMODULESTR, "muse"); rsp[rsp_len++] = '\n'; muse(executorHeap, size * 1024, &rsp[rsp_len], (size_t)(MAX_RSP - rsp_len), &muse_len, &bufferFull); rsp_len += muse_len; if ((!bufferFull) && (NULL != cli_globals->getIpcHeap())) { muse(cli_globals->getIpcHeap(), size * 1024, &rsp[rsp_len], (size_t)(MAX_RSP - rsp_len), &muse_len, &bufferFull); rsp_len += muse_len; } if ((!bufferFull) && (NULL != cli_globals->getStatsHeap() )) { muse(cli_globals->getStatsHeap()->getParent(), size * 1024, &rsp[rsp_len], (size_t)(MAX_RSP - rsp_len), &muse_len, &bufferFull); rsp_len += muse_len; } } sqstateic_reply(sre, rsp, rsp_len); } } // // pi entry point for muse // SQSTATE_PI_EP(MYMODULE,muse,node,proc,info,lib) { char rsp[MAX_RSP]; int rsp_len; sqstatepi_printf("%s/%s: pi\n", MYMODULESTR, "muse"); if ((MS_Mon_Node_Info_Entry_Type *)NULL == node) { sqstatepi_printf(" Invalid node\n"); return; } if ((MS_Mon_Process_Info_Type *)NULL == proc) { sqstatepi_printf(" Invalid proc\n"); return; } sqstatepi_printf(" node-info. nname=%s, nid=%d\n", node->node_name, node->nid); sqstatepi_printf(" proc-info. pname=%s, p-id=%d/%d\n", proc->process_name, proc->nid, proc->pid); sqstatepi_printf(" plugin-info. verbose=%d, verbosev=%d\n", info->verbose, info->verbosev); if (sqstatepi_send_ic_ok(MYMODULESTR, // module "muse", // call node, // node proc, // proc info, // info lib, // library rsp, // rsp MAX_RSP, // rsp-len &rsp_len)) // rsp-len sqstatepi_print_ic_reply(rsp, rsp_len); } const char * exetrace="exetrace"; // // usage info for executor tracing // Int32 trPrintUsage(char usageStr[], Int32 len) { len += sprintf(&usageStr[len], "sqstate <sqstate args> exetrace-exetrace [[-icarg <TraceOpt>] ... ]\n"); len += sprintf(&usageStr[len], // " <TraceOpt> := progname=<value> | ofilename=<value> | <listOpt>\n"); " <TraceOpt> := progname=<value> | <listOpt>\n"); len += sprintf(&usageStr[len], " <value> := tdm_arkesp | tdm_arkcmp | mxosrvr | <file_name>\n"); len += sprintf(&usageStr[len], // " <listOpt> := listall | listone=<trace_id> | listinfoall | suspend | help\n"); " <listOpt> := listinfoall | listone=<trace_id> | suspend | help\n"); len += sprintf(&usageStr[len], "\n Note:\n"); //len += sprintf(&usageStr[len], // " * use ofilename option to specify output file name\n"); len += sprintf(&usageStr[len], " * use listinfoall option to list currently registered traces\n"); len += sprintf(&usageStr[len], " * use listone option to list trace data for a trace id\n"); //len += sprintf(&usageStr[len], // " * use listall option to dump all date from registered traces\n"); len += sprintf(&usageStr[len], " * use suspend option to halt all threads on subject process(es)\n"); len += sprintf(&usageStr[len], " while retreiving trace data\n"); return len; } // // ic entry point for esp tracing // to set break point in debug, use sqstate_ic_exestate_exetrace as the name // SQSTATE_IC_EP(MYMODULE,exetrace,sre) { Int32 arg; Int32 argc; char *argv[MAX_IC_ARGS]; char rsp[MAX_RSP]; Int32 rsp_len; bool showMine = true; bool showAllTraces = false; bool showOneTrace = false; bool showTraceInfoAll = false; bool toSuspend = false; void *traceId = 0; if (sqstateic_get_ic_args(sre, &argc, argv, MAX_IC_ARGS, rsp, MAX_RSP, &rsp_len)) { CliGlobals *g = cli_globals; if (g) { if (strlen(g->myProgName()) == 0) g->initMyProgName(); if (argc > 0) { // rsp_len += sprintf(&rsp[rsp_len], "ic-args:\n"); for (arg = 0; arg < argc; arg++) { if (strncmp(argv[arg], "progname", 8) == 0) { char *pname = strchr(argv[arg], '='); if (strncmp((++pname),g->myProgName(),strlen(pname))) showMine = false; } else if (strncmp(argv[arg], "ofilename", 9) == 0) { // to be coded } else if (strncmp(argv[arg], "listall", 7) == 0) { showAllTraces = true; // more to be coded } else if (strncmp(argv[arg], "listone", 7) == 0) { showOneTrace = true; char *p = strchr(argv[arg], '='); while (p && *p == ' ') p++; if (!p) { // bad input rsp_len += sprintf(&rsp[rsp_len], "Invalid: arg[%d]='%s'\n", arg, argv[arg]); rsp_len = trPrintUsage(rsp, rsp_len); } else { sscanf(p, "=%p", &traceId); } } else if (strncmp(argv[arg], "listinfoall", 11) == 0) { showTraceInfoAll = true; } else if (strncmp(argv[arg], "suspend", 7) == 0) { toSuspend = true; } else if (strncmp(argv[arg], "help", 4) == 0) { // print usage rsp_len = trPrintUsage(rsp, rsp_len); showMine = false; break; } else { // invalid option rsp_len += sprintf(&rsp[rsp_len], "Invalid: arg[%d]='%s'\n", arg, argv[arg]); rsp_len = trPrintUsage(rsp, rsp_len); showMine = false; break; } } } if (showMine) { if (toSuspend) thread_suspend_all(); rsp_len += sprintf(&rsp[rsp_len], "From process %s (%s pin %.8d):\n", g->myProcessNameString(), g->myProgName(), g->myPin()); rsp_len += sprintf(&rsp[rsp_len], "progName=%s\n", g->myProgName()); if (showTraceInfoAll) { if (g->getExeTraceInfo()) { Int32 len = 0; ExeTraceInfo *ti = g->getExeTraceInfo(); Int32 ret = ti->getExeTraceInfoAll(&rsp[rsp_len], MAX_RSP - rsp_len, &len); rsp_len += len; if (ret == 0) { // not more trace info rsp_len += sprintf(&rsp[rsp_len], "No more Executor Trace Info.\n"); } else if (len == 0) { // not enough space rsp_len -= 80; // back a bit in order to print this: rsp_len += sprintf(&rsp[rsp_len], "\n\tBuffer not big enough!! Information truncated.\n"); } } else { rsp_len += sprintf(&rsp[rsp_len], "Executor Trace Info unavailable.\n"); } } if (showOneTrace) { if (g->getExeTraceInfo()) { Int32 len = 0; ExeTraceInfo *ti = g->getExeTraceInfo(); Int32 ret = ti->getExeTraceById(traceId, &rsp[rsp_len], MAX_RSP - rsp_len, &len); rsp_len += len; if (ret < 0) { // not more trace info rsp_len += sprintf(&rsp[rsp_len], "No Executor Trace for id=%p.\n", traceId); } else if (ret > 0) { // not enough space // back some space to print the warning message... (80 bytes) rsp_len = (MAX_RSP - rsp_len > 80)? rsp_len: MAX_RSP - 80; rsp_len += sprintf(&rsp[rsp_len], "\n\tBuffer not big enough for trace data!!\n"); } } else { rsp_len += sprintf(&rsp[rsp_len], "Executor Trace Info unavailable.\n"); } showOneTrace = false; // reset } rsp_len += sprintf(&rsp[rsp_len], "Done\n"); if (toSuspend) thread_resume_suspended(); } } else { rsp_len += sprintf(&rsp[rsp_len], "Not yet initialized to get exe trace info.\n"); } } sqstateic_reply(sre, rsp, rsp_len); } // // pi entry point // SQSTATE_PI_EP(MYMODULE,exetrace,node,proc,info,lib) { char rsp[MAX_RSP]; Int32 rsp_len; sqstatepi_printf("%s/%s: pi\n", MYMODULESTR, "exetrace"); if ((MS_Mon_Node_Info_Entry_Type *)NULL == node) { sqstatepi_printf(" Invalid node\n"); return; } if ((MS_Mon_Process_Info_Type *)NULL == proc) { sqstatepi_printf(" Invalid proc\n"); return; } if (sqstatepi_send_ic_ok(MYMODULESTR, // module exetrace, // call node, // node proc, // proc info, // info lib, // lib rsp, // rsp MAX_RSP, // rsp-len &rsp_len)) // rsp-len sqstatepi_print_ic_reply(rsp, rsp_len); } // SQSTATE segment end NABoolean doGenerateAnEMSEvent (char *localEMSExperienceLevel, const Int32 experienceLevelLen, SQLMXLoggingArea::ExperienceLevel emsEventEL) { if (getenv("TEST_ERROR_EVENT")) return TRUE; // generate an error event for any error for testing if (!((emsEventEL != SQLMXLoggingArea::eBeginnerEL) && (!str_cmp(localEMSExperienceLevel, "BEGINNER", experienceLevelLen)))) { return TRUE; // go ahead and generate an EMS event } return FALSE; } // Generate an EMS event when the condition's diagnostics information is retrieved // In the case where the same diag condition is queried multiple times, only one EMS event // is generated for this condition. This assertion is ensured by the getEMSEventVisits() // method of the ComCondition class. The repeated querying of the same condition may be originated // internally or externally. It can be originated internally from components such as the catalog // manager or MXCI and externally from a user application. // // We do not generate events for errors with the following SQLCodes // 0: normal returns - no error or warning // 100: EOF // 8822: the "The statement was not prepared" error code. Ignore this error since // it is not descriptive and more information should be available with an // acommpanying condition. // 20109: MXCI return code that encapsulates a utility error. The utility error should // be handled by the utility layer so ignore it here // EXE_RTS_QID_NOT_FOUND: Filter out any retcode occuring from any CLI call // that can be ignored from logging into EMS void logAnMXEventForError( ComCondition & condition, SQLMXLoggingArea::ExperienceLevel emsEventEL) { // all the lengths are defined in logmxevent.h char buf [ EMS_BUFFER_SIZE+1 ] = "\0"; char localEMSSeverity[ EMS_SEVERITY_LEN+1 ] = "\0"; char localEMSEventTarget[ EMS_EVENT_TARGET_LEN+1 ] = "\0"; char localEMSExperienceLevel[ EMS_EXPERIENCE_LEVEL_LEN+1 ] = "\0"; NABoolean forceDialout = FALSE; NABoolean isWarning = FALSE; Int64 transid = -1; if (condition.getEMSEventVisits() >= 1) return; Lng32 sqlCode = condition.getSQLCODE(); if ( (sqlCode == 0) || (sqlCode == 100) || (sqlCode == -EXE_RTS_QID_NOT_FOUND) || (sqlCode == -8822) || (sqlCode == -20109) ) return; if (sqlCode == -8551) { Lng32 nskCode = condition.getNskCode(); if ( (nskCode == 40) || (nskCode == 35) ) forceDialout = TRUE; // If the caller is UNC then suppress error 73 if (GetCliGlobals()->isUncProcess()) { if (sqlCode == -8551) if (nskCode == 73) return; } } // event-ids for the events to be generated are the absolute values for the // errors' sqlcodes Lng32 sqlcodeToEventId = sqlCode; if (sqlCode >0) isWarning = TRUE; if ( sqlCode < 0 ) sqlcodeToEventId = sqlCode * (-1); transid = GetCliGlobals()->currContext()->getTransaction()->getTransid(); // the EMS event attributes are always in UTF8 UnicodeStringToLocale( CharInfo::UTF8, (NAWchar *) (condition.getMessageText(TRUE, // NABoolean prefixAdded CharInfo::UTF8 ) ), // msg in UCS2 condition.getMessageLength(), buf, EMS_BUFFER_SIZE, TRUE /* addNullAtEnd */, TRUE /*allowInvalidCodePoint */); // retrieve the EMS event attributes for error with sqlcode sqlCode ComEMSSeverity( sqlCode, localEMSSeverity ); // Note: This function will downgrade any warnings which have // an EventTarget as DIALOUT to LOGONLY // This function will also force a dialout if the flag is TRUE ComEMSEventTarget( sqlCode, localEMSEventTarget, forceDialout ); ComEMSExperienceLevel( sqlCode, localEMSExperienceLevel ); // generate an event if the user experience level of the error is compatible with the // current system default user experience level. Compatible here means that the error // experience level is higher or equal to the system default experience level. // For instance, the ADVANCED experience level is higher than the BEGINNER experience level. if (doGenerateAnEMSEvent( localEMSExperienceLevel, EMS_EXPERIENCE_LEVEL_LEN, emsEventEL )) { // convert String params to locale char lstring0 [ EMS_BUFFER_SIZE+1 ] = "\0" ; char lstring1 [ EMS_BUFFER_SIZE+1 ] = "\0" ; char lstring2 [ EMS_BUFFER_SIZE+1 ] = "\0" ; char lstring3 [ EMS_BUFFER_SIZE+1 ] = "\0" ; char lstring4 [ EMS_BUFFER_SIZE+1 ] = "\0" ; // convert event attributes to UTF8 for (int i=0; i<5; i++) { char *lstringi = NULL; switch (i) { case 0: lstringi = lstring0; break; case 1: lstringi = lstring1; break; case 2: lstringi = lstring2; break; case 3: lstringi = lstring3; break; case 4: lstringi = lstring4; break; } if (condition.hasOptionalString(i)) { if (condition.getOptionalString(i) && condition.getOptionalStringCharSet(i) == CharInfo::UTF8) { strcpy(lstringi,condition.getOptionalString(i)); } else if (condition.getOptionalWString(i)) { assert(condition.getOptionalStringCharSet(i) == CharInfo::UNICODE); UnicodeStringToLocale( CharInfo::UTF8, condition.getOptionalWString(i), na_wcslen(condition.getOptionalWString(i)), lstringi, EMS_BUFFER_SIZE, TRUE /* addNullAtEnd */, TRUE /*allowInvalidCodePoint */); } else { char *dummyFirstUntranslatedChar; LocaleToUTF8( cnv_version1, condition.getOptionalString(i), str_len(condition.getOptionalString(i)), lstringi, EMS_BUFFER_SIZE, convertCharsetEnum(condition.getOptionalStringCharSet(i)), dummyFirstUntranslatedChar, NULL, TRUE); /* addNullAtEnd */ } } } // when logging using log4cxx these are the only tokens used SQLMXLoggingArea::logSQLMXEventForError( sqlcodeToEventId, buf, condition.getSqlID(), isWarning); /* // when logging using seapilot more tokens can be used SQLMXLoggingArea::logSQLMXEventForError( sqlcodeToEventId, localEMSExperienceLevel, localEMSSeverity, localEMSEventTarget, buf , condition.getSqlID(), condition.getOptionalInteger(0), condition.getOptionalInteger(1), condition.getOptionalInteger(2), condition.getOptionalInteger(3), condition.getOptionalInteger(4), lstring0, lstring1, lstring2, lstring3, lstring4, condition.getServerName(), condition.getConnectionName(), condition.getConstraintCatalog(), condition.getConstraintSchema(), condition.getConstraintName(), condition.getTriggerCatalog(), condition.getTriggerSchema(), condition.getTriggerName(), condition.getCatalogName(), condition.getSchemaName(), condition.getTableName(), condition.getColumnName(), transid, condition.getRowNumber(), condition.getNskCode(), isWarning); */ } } #endif // CLI_PRIV_SRL // RecordError // // This wrapper is called after every CLI function to capture the SQLID and // associate it with the conditions as well as RTS Lng32 RecordError(SQLSTMT_ID * currentSqlStmt, Lng32 inRetcode) { if (inRetcode == 0) return inRetcode; #ifndef CLI_PRIV_SRL // Get the SQL ID (aka SQL_ATTR_UNIQUE_STMT_ID) from the statement (SQLSTMT_ID) if (currentSqlStmt != NULL) { SQL_EXEC_SetStmtAttr( currentSqlStmt, SQL_ATTR_COPY_STMT_ID_TO_DIAGS, 0, NULL ); if (inRetcode != -EXE_RTS_QID_NOT_FOUND) { SQL_EXEC_SetErrorCodeInRTS(currentSqlStmt, inRetcode); } } #endif // CLI_PRIV_SRL return(inRetcode); } // DllMain DLL_PROCESS_DETACH routine suspends the memory manager update thread so that an // access violation does not occur after Cheyenne unloads the DLL. #ifdef SQ_CPP_INTF extern short my_mpi_setup (Int32* argc, char** argv[] ); #endif extern "C" { #ifndef SQ_CPP_INTF short my_mpi_setup (Int32* argc, char** argv[] ); #endif }; static short sqInit() { static bool sbInitialized = false; if (!sbInitialized) { sbInitialized = true; Int32 largc = 0; char **largv = 0; char procFileName[128]; FILE *proc_file = 0; char buf[8193]; short p_i = 0; Int32 c = 0; // This memory is never freed. largv = (char **) malloc(100 * sizeof(char *)); sprintf(procFileName, "/proc/%d/cmdline",getpid()); proc_file = fopen(procFileName, "r"); buf[0] = 0; p_i = 0; while ((c = fgetc(proc_file)) != EOF) { buf[p_i++] = c; if (p_i >= sizeof(buf)) abort(); if (c == 0) { // This memory is never freed. largv[largc] = (char *) malloc ((p_i + 1) * sizeof(char)); strcpy(largv[largc++], buf); p_i = 0; buf[0] = 0; } } fclose(proc_file); try { short retcode = my_mpi_setup(&largc, &largv); } catch (...) { cerr << "Error while initializing messaging system. Exiting..." << endl; exit(1); } } return 0; } #ifndef CLI_PRIV_SRL static Lng32 CliNonPrivPrologue() { Lng32 retcode; if (cli_globals == NULL || cli_globals->getIsInitialized() == FALSE) { globalSemaphore.get(); if (cli_globals != NULL) { globalSemaphore.release(); return 0; } sqInit(); CliGlobals::createCliGlobals(FALSE); // this call will create the globals. ex_assert(SQL_EXEC_SetEnviron_Internal(1) == 0, "Unable to set the Environment"); globalSemaphore.release(); } return 0; } #endif CLISemaphore *getCliSemaphore(ContextCli *&context) { context = cli_globals->currContext(); if (context != NULL) return context->getSemaphore(); else return cli_globals->getSemaphore(); } //SQ_LINUX #ifdef NA_WINNT #if 0 Lng32 cliWillThrow() { #ifdef _DEBUG // It is used to execute debug code like no-debug (release) code static Lng32 willThrow = -1; if (willThrow == -1) { char *ptr = getenv("SQLMX_CLI_WILL_THROW"); if (ptr) { willThrow = atoi(ptr); if (willThrow != 0) willThrow = 1; } else willThrow = 1; } return willThrow; #else return 1; #endif } #endif SQLCLI_LIB_FUNC Lng32 SQL_EXEC_AddModule(/*IN*/ SQLMODULE_ID * module_name) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AddModule(GetCliGlobals(), module_name); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } //ss_cc_change // ignore all COBOL functions // COBOL change : For all the wrapper functions that we provide for the COBOL // preprocessor, we assume that if there is a struct that is passed in with // name_mode == -1 , then this means that that struct is a just a "dummy". // This is because COBOL applications do not have the ability to pass in a // NULL pointer into the function as a parameter. //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ADDMODULE ( /*IN*/ SQLMODULE_ID * module_name) { // See comment above : "COBOL change" if (module_name) { if (module_name->version == -1) return SQL_EXEC_AddModule(0); } return SQL_EXEC_AddModule(module_name); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_AllocDesc(/*INOUT*/ SQLDESC_ID * desc_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AllocDesc(GetCliGlobals(), desc_id, input_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ALLOCDESC ( /*INOUT*/ SQLDESC_ID * desc_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor) { // See comment above : "COBOL change" if (input_descriptor && (input_descriptor->name_mode == -1)) input_descriptor = NULL; return SQL_EXEC_AllocDesc(desc_id, input_descriptor); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_AllocDescBasic(/*INOUT*/ SQLDESC_ID * desc_id, /*IN OPTIONAL*/ Lng32 max_entries) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AllocDescInt(GetCliGlobals(), desc_id, max_entries); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_AllocStmt(/*INOUT*/ SQLSTMT_ID * new_statement_id, /*IN OPTIONAL*/ SQLSTMT_ID * cloned_statement) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AllocStmt(GetCliGlobals(), new_statement_id, cloned_statement); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Int32 SQL_EXEC_GetDiskMaxSize ( /*IN*/ char *volname, /*OUT*/ Int64 *totalCapacity, /*OUT*/ Int64 *totalFreespace) { Lng32 retcode; CLISemaphore *tmpSemaphore; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = GetCliGlobals()->getSemaphore(); tmpSemaphore->get(); retcode = SQLCLI_GetDiskMaxSize(GetCliGlobals(), volname, totalCapacity, totalFreespace ); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { tmpSemaphore->release(); throw; } #endif } tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Int32 SQL_EXEC_GetListOfDisks ( /*IN/OUT*/ char *diskBuffer, /* OUT */ Int32 *numTSEs, /* OUT */ Int32 *maxTSELength, /* IN/OUT */ Int32 *diskBufferLength ) { Lng32 retcode; CLISemaphore *tmpSemaphore; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = GetCliGlobals()->getSemaphore(); tmpSemaphore->get(); retcode = SQLCLI_GetListOfDisks(GetCliGlobals(), diskBuffer,numTSEs,maxTSELength, diskBufferLength ); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { tmpSemaphore->release(); throw; } #endif } tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ALLOCSTMT ( /*INOUT*/ SQLSTMT_ID * new_statement_id, /*IN OPTIONAL*/ SQLSTMT_ID * cloned_statement) { // See comment above : "COBOL change" if (cloned_statement && (cloned_statement->name_mode == -1)) cloned_statement = NULL; return SQL_EXEC_AllocStmt(new_statement_id, cloned_statement); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_AllocStmtForRS(/*IN*/ SQLSTMT_ID *callStmtId, /*IN*/ Lng32 resultSetIndex, /*INOUT*/ SQLSTMT_ID *resultSetStmtId) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AllocStmtForRS(GetCliGlobals(), callStmtId, resultSetIndex, resultSetStmtId); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ALLOCSTMTFORRS ( /*IN*/ SQLSTMT_ID *callStmtId, /*IN*/ Lng32 resultSetIndex, /*INOUT*/ SQLSTMT_ID *resultSetStmtId) { return SQL_EXEC_AllocStmtForRS(callStmtId, resultSetIndex, resultSetStmtId); } //LCOV_EXCL_STOP //nowait CLI //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_AssocFileNumber(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ short file_number) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AssocFileNumber(GetCliGlobals(), statement_id, file_number); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ASSOCFILENUMBER (/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ short file_number) { return SQL_EXEC_AssocFileNumber(statement_id, file_number); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ClearDiagnostics(/*IN*/ SQLSTMT_ID *statement_id) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ClearDiagnostics(GetCliGlobals(), statement_id); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CLEARDIAGNOSTICS (/*IN*/ SQLSTMT_ID *statement_id){ return SQL_EXEC_ClearDiagnostics(statement_id); }; #if defined (CLI_LIB) #pragma srlexports #endif SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CLI_VERSION() { return CLI_VERSION; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CloseStmt(/*IN*/ SQLSTMT_ID * statement_id) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_CloseStmt(GetCliGlobals(), statement_id); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CLOSESTMT ( /*IN*/ SQLSTMT_ID * statement_id){ return SQL_EXEC_CloseStmt(statement_id); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CreateContext(/*OUT*/ SQLCTX_HANDLE * context_handle, /*IN*/ char* sqlAuthId, /*IN*/ Lng32 forFutureUse) { Lng32 retcode; CLI_NONPRIV_PROLOGUE(retcode); try { retcode = SQLCLI_CreateContext(GetCliGlobals(), context_handle, sqlAuthId, forFutureUse); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { throw; } #endif } retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CREATECONTEXT(/*OUT*/ SQLCTX_HANDLE * context_handle, /*IN*/ char* sqlAuthId, /*IN*/ Lng32 forFutureUse) { return SQL_EXEC_CreateContext(context_handle, sqlAuthId, forFutureUse); } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CurrentContext(/*OUT*/ SQLCTX_HANDLE * contextHandle) { Lng32 retcode; CLI_NONPRIV_PROLOGUE(retcode); try { retcode = SQLCLI_CurrentContext(GetCliGlobals(), contextHandle); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { throw; } #endif } retcode = RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CURRENTCONTEXT(/*OUT*/ SQLCTX_HANDLE * contextHandle){ return SQL_EXEC_CurrentContext(contextHandle); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DeleteContext(/*IN*/ SQLCTX_HANDLE contextHandle) { Lng32 retcode; CLI_NONPRIV_PROLOGUE(retcode); try { retcode = SQLCLI_DeleteContext(GetCliGlobals(), contextHandle); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { throw; } #endif } retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DELETECONTEXT(/*IN*/ SQLCTX_HANDLE contextHandle){ return SQL_EXEC_DeleteContext(contextHandle); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DropModule(/*IN*/ SQLMODULE_ID * module_name) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_DropModule(GetCliGlobals(), module_name); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ResetContext(/*IN*/ SQLCTX_HANDLE contextHandle, /*IN*/ void *contextMsg) { Lng32 retcode; try { retcode = SQLCLI_ResetContext(GetCliGlobals(), contextHandle, contextMsg); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { throw; } #endif } retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_RESETCONTEXT(/*IN*/ SQLCTX_HANDLE contextHandle, /*IN*/ void *contextMsg){ return SQL_EXEC_ResetContext(contextHandle, contextMsg); }; // new UDR interface, internal use SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetUdrErrorFlags_Internal(/*OUT*/ Lng32 *udrErrorFlags) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetUdrErrorFlags_Internal(GetCliGlobals(), udrErrorFlags); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetUdrAttributes_Internal(/*IN*/ Lng32 sqlAccessMode, /*IN*/ Lng32 forFutureUse) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetUdrAttributes_Internal(GetCliGlobals(), sqlAccessMode, forFutureUse); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ResetUdrErrorFlags_Internal() { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ResetUdrErrorFlags_Internal(GetCliGlobals()); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetUdrRuntimeOptions_Internal(/*IN*/ const char *options, /*IN*/ ULng32 optionsLen, /*IN*/ const char *delimiters, /*IN*/ ULng32 delimsLen) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetUdrRuntimeOptions_Internal(GetCliGlobals(), options, optionsLen, delimiters, delimsLen); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DeallocDesc(/*IN*/ SQLDESC_ID * desc_id) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_DeallocDesc(GetCliGlobals(), desc_id); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DEALLOCDESC ( /*IN*/ SQLDESC_ID * desc_id ){ return SQL_EXEC_DeallocDesc(desc_id); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DeallocStmt(/*IN*/ SQLSTMT_ID * statement_id) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_DeallocStmt(GetCliGlobals(), statement_id); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); // Filter out -8804 since preprocessor always issue DeallocStmt and then // allocate statement in case of embedded dynamic sql statements if (retcode == -CLI_STMT_NOT_EXISTS) { if (statement_id->module == NULL || (statement_id->module != NULL && statement_id->module->module_name == NULL)) retcode = RecordError(statement_id, retcode); } else { retcode = RecordError(statement_id, retcode); } return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DEALLOCSTMT ( /*IN*/ SQLSTMT_ID * statement_id){ return SQL_EXEC_DeallocStmt(statement_id); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DefineDesc(/*IN*/ SQLSTMT_ID * statement_id, /* (SQLWHAT_DESC) *IN*/ Lng32 what_descriptor, /*IN*/ SQLDESC_ID * sql_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_DefineDesc(GetCliGlobals(), statement_id, what_descriptor, sql_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DEFINEDESC ( /*IN*/ SQLSTMT_ID * statement_id, /*IN* (SQLWHAT_DESC) */ Lng32 what_descriptor, /*IN*/ SQLDESC_ID * sql_descriptor){ return SQL_EXEC_DefineDesc(statement_id, what_descriptor, sql_descriptor); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DescribeStmt(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_DescribeStmt(GetCliGlobals(), statement_id, input_descriptor, output_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DESCRIBESTMT ( /*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor) { // See comment above : "COBOL change" if (input_descriptor && (input_descriptor->name_mode == -1)) input_descriptor = NULL; if (output_descriptor && (output_descriptor->name_mode == -1)) output_descriptor = NULL; return SQL_EXEC_DescribeStmt(statement_id, input_descriptor, output_descriptor); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DisassocFileNumber(/*IN*/ SQLSTMT_ID * statement_id) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_DisassocFileNumber(GetCliGlobals(), statement_id); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DISASSOCFILENUMBER (/*IN*/ SQLSTMT_ID * statement_id){ return SQL_EXEC_DisassocFileNumber(statement_id); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DropContext(/*IN*/ SQLCTX_HANDLE context_handle) { Lng32 retcode; CLI_NONPRIV_PROLOGUE(retcode); try { retcode = SQLCLI_DropContext(GetCliGlobals(), context_handle); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { throw; } #endif } retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DROPCONTEXT ( /*IN*/ SQLCTX_HANDLE context_handle ){ return SQL_EXEC_DropContext(context_handle); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_Exec(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_Exec(GetCliGlobals(), statement_id, input_descriptor, num_ptr_pairs, ap, 0); if (retcode < 0) { retcode = SQLCLI_ProcessRetryQuery(GetCliGlobals(), statement_id, retcode, 0, 1, 0, 0); } } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_EXEC ( /*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_Exec(GetCliGlobals(), statement_id, input_descriptor, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ExecClose(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_ExecClose(GetCliGlobals(), statement_id, input_descriptor, num_ptr_pairs, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_EXECCLOSE ( /*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ExecClose(GetCliGlobals(), statement_id, input_descriptor, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ExecDirect(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_ExecDirect(GetCliGlobals(), statement_id, sql_source, input_descriptor, num_ptr_pairs, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ExecDirect2(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source, /*IN */ Int32 prep_flags, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_ExecDirect2(GetCliGlobals(), statement_id, sql_source, prep_flags, input_descriptor, num_ptr_pairs, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_EXECDIRECT ( /*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ExecDirect(GetCliGlobals(), statement_id, sql_source, input_descriptor, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ExecDirectDealloc(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_ExecDirectDealloc(GetCliGlobals(), statement_id, sql_source, input_descriptor, num_ptr_pairs, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL /* statement_id is now deallocated */, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_EXECDIRECTDEALLOC(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ExecDirectDealloc(GetCliGlobals(), statement_id, sql_source, input_descriptor, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL /* statement_id is now deallocated */, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ExecFetch(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_ExecFetch(GetCliGlobals(), statement_id, input_descriptor, num_ptr_pairs, ap, 0); if (retcode < 0) { retcode = SQLCLI_ProcessRetryQuery(GetCliGlobals(), statement_id, retcode, 0, 1, 1, 0); } } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_EXECFETCH(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ExecFetch(GetCliGlobals(), statement_id, input_descriptor, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ClearExecFetchClose(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 num_input_ptr_pairs, /*IN*/ Lng32 num_output_ptr_pairs, /*IN*/ Lng32 num_total_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_total_ptr_pairs); retcode = SQLCLI_ClearExecFetchClose(GetCliGlobals(), statement_id, input_descriptor, output_descriptor, num_input_ptr_pairs, num_output_ptr_pairs, num_total_ptr_pairs, ap, 0, 0); if (retcode < 0) { retcode = SQLCLI_ProcessRetryQuery(GetCliGlobals(), statement_id, retcode, 0, 0, 0, 1); } } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CLEAREXECFETCHCLOSE(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * input_descriptor, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 num_input_ptr_pairs, /*IN*/ Lng32 num_output_ptr_pairs, /*IN*/ Lng32 num_total_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS input_ptr_pairs[], /*IN*/ SQLCLI_PTR_PAIRS output_ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ClearExecFetchClose(GetCliGlobals(), statement_id, input_descriptor, output_descriptor, num_input_ptr_pairs, num_output_ptr_pairs, num_total_ptr_pairs, 0, input_ptr_pairs, output_ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_Fetch(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_Fetch(GetCliGlobals(), statement_id, output_descriptor, num_ptr_pairs, ap, 0); if (retcode < 0) { retcode = SQLCLI_ProcessRetryQuery(GetCliGlobals(), statement_id, retcode, 0, 0, 1, 0); } } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_FETCH(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_Fetch(GetCliGlobals(), statement_id, output_descriptor, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_FetchClose(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_FetchClose(GetCliGlobals(), statement_id, output_descriptor, num_ptr_pairs, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_FETCHCLOSE(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_FetchClose(GetCliGlobals(), statement_id, output_descriptor, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_FetchMultiple(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 rowset_size, /*IN*/ Lng32 * rowset_status_ptr, /*OUT*/ Lng32 * rowset_nfetched, /*IN*/ Lng32 num_quadruple_fields, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_quadruple_fields); retcode = SQLCLI_FetchMultiple(GetCliGlobals(), statement_id, output_descriptor, rowset_size, rowset_status_ptr, rowset_nfetched, num_quadruple_fields, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_FETCHMULTIPLE(/*IN*/ SQLSTMT_ID * statement_id, /*IN OPTIONAL*/ SQLDESC_ID * output_descriptor, /*IN*/ Lng32 rowset_size, /*IN*/ Lng32 * rowset_status_ptr, /*OUT*/ Lng32 * rowset_nfetched, /*IN*/ Lng32 num_quadruple_fields, /*IN*/ SQLCLI_QUAD_FIELDS quad_fields[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_FetchMultiple(GetCliGlobals(), statement_id, output_descriptor, rowset_size, rowset_status_ptr, rowset_nfetched, num_quadruple_fields, 0, quad_fields); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_STOP // Called by the cancel thread. // Don't use getCliSemaphore(currContext)->to acquire a critical section. // Don't use cliSemaphore->to acquire a critical section. // cancelSemaphore is used inside SQLCLI_Cancel. SQLCLI_LIB_FUNC Lng32 SQL_EXEC_Cancel(/*IN OPTIONAL*/ SQLSTMT_ID * statement_id) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { retcode = SQLCLI_Cancel(GetCliGlobals(), statement_id); } catch(...) { #if defined(_THROW_EXCEPTIONS) throw; #else retcode = -CLI_INTERNAL_ERROR; #endif } retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_CANCEL ( /*IN OPTIONAL*/ SQLSTMT_ID * statement_id){ // See comment above : "COBOL change" if (statement_id && (statement_id->name_mode == -1)) statement_id = NULL; return SQL_EXEC_Cancel(statement_id); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDescEntryCount(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ID * output_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDescEntryCount(GetCliGlobals(), sql_descriptor, output_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDESCENTRYCOUNT( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ID * output_descriptor){ return SQL_EXEC_GetDescEntryCount(sql_descriptor, output_descriptor); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDescEntryCountBasic(/*IN*/ SQLDESC_ID * sql_descriptor, /*OUT*/ Lng32 * num_entries) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDescEntryCountInt(GetCliGlobals(), sql_descriptor, num_entries); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDescItem(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 entry, /* (SQLDESC_ITEM_ID) *IN*/ Lng32 what_to_get, /*OUT OPTIONAL*/ void * numeric_value, /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item, /*IN OPTIONAL*/ Lng32 start_from_offset) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDescItem(GetCliGlobals(), sql_descriptor, entry, what_to_get, numeric_value, string_value, max_string_len, len_of_item, start_from_offset); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDESCITEM( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 entry, /*IN* (SQLDESC_ITEM_ID) */ Lng32 what_to_get, /*OUT OPTIONAL*/ void * numeric_value, /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item, /*IN OPTIONAL*/ Lng32 start_from_offset) { return SQL_EXEC_GetDescItem(sql_descriptor, entry, what_to_get, numeric_value, string_value, max_string_len, len_of_item, start_from_offset); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDescItems(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ITEM desc_items[], /*IN*/ SQLDESC_ID * value_num_descriptor, /*IN*/ SQLDESC_ID * output_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDescItems(GetCliGlobals(), sql_descriptor, desc_items, value_num_descriptor, output_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDESCITEMS( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ITEM desc_items[], /*IN*/ SQLDESC_ID * value_num_descriptor, /*IN*/ SQLDESC_ID * output_descriptor){ return SQL_EXEC_GetDescItems(sql_descriptor, desc_items, value_num_descriptor, output_descriptor); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDescItems2(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 no_of_desc_items, /*IN*/ SQLDESC_ITEM desc_items[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDescItems2(GetCliGlobals(), sql_descriptor, no_of_desc_items, desc_items); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDESCITEMS2( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 no_of_desc_items, /*IN*/ SQLDESC_ITEM desc_items[]){ return SQL_EXEC_GetDescItems2(sql_descriptor, no_of_desc_items, desc_items); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDiagnosticsStmtInfo(/*IN*/ Lng32 * stmt_info_items, /*IN*/ SQLDESC_ID * output_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDiagnosticsStmtInfo(GetCliGlobals(), stmt_info_items, output_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDiagnosticsStmtInfo2( /*IN OPTIONAL*/ SQLSTMT_ID * statement_id, /*IN* (SQLDIAG_STMT_INFO_ITEM_ID) */ Lng32 what_to_get, /*OUT OPTIONAL*/ void * numeric_value, // NA_64BIT /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDiagnosticsStmtInfo2(GetCliGlobals(), statement_id, what_to_get, numeric_value, string_value, max_string_len, len_of_item); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDIAGNOSTICSSTMTINFO( /*IN*/ Lng32 *stmt_info_items, /*IN*/ SQLDESC_ID * output_descriptor){ return SQL_EXEC_GetDiagnosticsStmtInfo(stmt_info_items, output_descriptor); }; #ifndef CLI_PRIV_SRL SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDIAGNOSTICSSTMTINFO2( /*IN OPTIONAL*/ SQLSTMT_ID * statement_id, /*IN* (SQLDIAG_STMT_INFO_ITEM_ID) */ Lng32 what_to_get, /*OUT OPTIONAL*/ void * numeric_value, // NA_64BIT /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item) { return SQL_EXEC_GetDiagnosticsStmtInfo2(statement_id, what_to_get, numeric_value, string_value, max_string_len, len_of_item); }; // same method as in Cli.cpp static void copyResultString(char * dest, const char * src, Lng32 destLen, Lng32 * copyLen = NULL) { assert (dest != NULL); if (copyLen) *copyLen = 0; if (src) { Lng32 len = MINOF(str_len(src)+1, destLen); str_cpy_all(dest, src, len); if (copyLen) *copyLen = len; } else *dest = '\0'; } //LCOV_EXCL_STOP // // GetAutoSizedCondInfo // // Handles all of the details of GetDiagnosticsCondInfo, resizing // and unpacking the diagnostics area. (refactored out of // SQL_EXEC_GetDiagnosticsCondInfo) // static Lng32 GetAutoSizedCondInfo(Lng32 * retcode, SQLDIAG_COND_INFO_ITEM * cond_info_items, SQLDESC_ID * cond_num_descriptor, SQLDESC_ID * output_descriptor, ComDiagsArea * diagsArea, Lng32 try_count, DiagsConditionItem ** condition_item_array, SQLMXLoggingArea::ExperienceLevel * emsEventEL ) { NABoolean resize = TRUE; const Lng32 try_size =512; Lng32 condition_item_count_needed, condition_item_count = try_count; IpcMessageObjSize message_obj_size_needed, message_obj_size = try_size; IpcMessageBufferPtr message_buffer_ptr = new char[try_size]; IpcMessageObjType message_obj_type; IpcMessageObjVersion message_obj_version; *retcode = 0; // The array pointed to by condition_item_array and the buffer pointed to by // message_buffer_ptr have been allocated with sizes that should be adequate // most of the time. If either is not large enough, SQLCLI_GetDiagnosticsCondInfo // will return with the sizes that are needed. When we detect this we will resize, // and call again. // Can't this be refactored to a simple if-statment, rather than a loop? -KBC CLISemaphore *tmpSemaphore; ContextCli *threadContext; while (*retcode == 0 && resize) { try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); *retcode = SQLCLI_GetDiagnosticsCondInfo(GetCliGlobals(), cond_info_items, cond_num_descriptor, output_descriptor, message_buffer_ptr, message_obj_size, &message_obj_size_needed, &message_obj_type, &message_obj_version, condition_item_count, &condition_item_count_needed, (*condition_item_array), emsEventEL); } catch(...) { *retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); if (*retcode != 0) continue; resize = FALSE; if (condition_item_count_needed > condition_item_count) { resize = TRUE; delete [] (*condition_item_array); condition_item_count = condition_item_count_needed; (*condition_item_array) = new DiagsConditionItem[condition_item_count]; } if (message_obj_size_needed > message_obj_size) { resize = TRUE; delete [] message_buffer_ptr; message_obj_size = message_obj_size_needed; message_buffer_ptr = new char[message_obj_size]; } } // while (*retcode == 0 && resize) if (condition_item_count_needed > 0) { // The actual diags area resides in the context heap and is inaccessible // here in non-priv code. We must unpack the copy which has been // packed for us in the buffer pointed to by message_buffer_ptr. diagsArea->unpackObj(message_obj_type, message_obj_version, TRUE, message_obj_size, message_buffer_ptr); } delete [] message_buffer_ptr; return (condition_item_count_needed); } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDiagnosticsCondInfo( /*IN*/ SQLDIAG_COND_INFO_ITEM * cond_info_items, /*IN*/ SQLDESC_ID * cond_num_descriptor, /*IN*/ SQLDESC_ID * output_descriptor) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); NAHeap heap("NonPriv CLI", NAMemory::DERIVED_FROM_SYS_HEAP, 32 * 1024); Lng32 msgInLocaleLen = 0; NABoolean msgInLocaleLenIsAvailable = FALSE; Lng32 condition_item_count; const Lng32 try_count = 16; // starting size; GetAutoSizeCondInfo may resize the array DiagsConditionItem *condition_item_array = new DiagsConditionItem[try_count]; NAWchar* p = NULL; SQLMXLoggingArea::ExperienceLevel emsEventEL = SQLMXLoggingArea::eAdvancedEL; ComDiagsArea *diagsArea = ComDiagsArea::allocate(&heap); if ((condition_item_count = GetAutoSizedCondInfo(&retcode, cond_info_items, cond_num_descriptor, output_descriptor, diagsArea, try_count, &condition_item_array, &emsEventEL )) > 0) { for (Lng32 i = 0; i < condition_item_count && retcode == 0; i++) { ComCondition & condition = (*diagsArea)[condition_item_array[i].condition_index]; // if (condition.getIso88591MappingCharSet() == CharInfo::UnknownCharSet) // { // // store the iso_mapping cqd setting to the ComCondition object // // for later use by its methods to do the charset conversion // condition.setIso88591MappingCharSet(isoMapCS); // } switch (condition_item_array[i].item_id) { case SQLDIAG_RET_SQLSTATE: /* (string ) returned SQLSTATE */ ComSQLSTATE(condition.getSQLCODE(), condition_item_array[i].var_ptr); break; case SQLDIAG_CLASS_ORIG: /* (string ) class origin, e.g. ISO 9075 */ copyResultString(condition_item_array[i].var_ptr, ComClassOrigin(condition.getSQLCODE()), condition_item_array[i].length); break; case SQLDIAG_SUBCLASS_ORIG: /* (string ) subclass origin, e.g. ISO 9075 */ copyResultString(condition_item_array[i].var_ptr, ComSubClassOrigin(condition.getSQLCODE()), condition_item_array[i].length); break; // R2 case SQLDIAG_MSG_TEXT: /* (string ) message text */ logAnMXEventForError(condition, emsEventEL); msgInLocaleLenIsAvailable = TRUE; p = (NAWchar *)(condition.getMessageText(TRUE, // NABoolean prefixAdded CharInfo::UTF8 ) ); // msg in UCS2 if ( p && condition_item_array[i].length > 0) { Int32 wMsgLen = condition.getMessageLength(); // msg length in UCS2 characters // assert (condition_item_array[i].charset != CharInfo::UnknownCharSet); // Do a conversion if the target charset is not UNICODE. // If the target charset is UNICODE, just do a str cpy. switch (condition_item_array[i].charset) { case CharInfo::UNICODE: { // condition_item_array[i].length gives the available // space (in chars) for the msg. Int32 wCharToConvert = MINOF(condition_item_array[i].length, wMsgLen); na_wcsncpy((NAWchar*)condition_item_array[i].var_ptr, (NAWchar*)p, wCharToConvert); msgInLocaleLen = wCharToConvert; if ( wMsgLen < condition_item_array[i].length ) ((wchar_t*)(condition_item_array[i].var_ptr))[msgInLocaleLen]= 0; } break; default: { // Convert the message from UCS2 to ISO_MAPPING charset // to UTF8 character set { Lng32 cnvErrStatus = 0; char * pFirstUntranslatedChar = NULL; cnvErrStatus = SQL_EXEC_UTF16ToLocale( cnv_UTF8, // IN Int32 conv_charset of output ((void *)p), // IN void * Input_Buffer_Addr wMsgLen * 2 /* bytes per UCS2 char */, // IN Int32 Input_Buffer_Octet_Length (void *)condition_item_array[i].var_ptr, // I/O void * Output_Buffer_Addr condition_item_array[i].length, // IN Int32 Output_Buffer_Octet_Length (void **)&pFirstUntranslatedChar, // OUT void * * First_Untranslated_Char_Addr &msgInLocaleLen, // OUT Int32 * Output_Data_Octet_Length 0, // IN Int32 conv_flags (Lng32)FALSE, // IN Int32 add_null_at_end_Flag (Lng32)TRUE, // IN Int32 allow_invalids (Lng32 *)NULL, // OUT Int32 * num_translated_char NULL // IN void * substitution_char_addr ); // use ? as the substitute char switch(cnvErrStatus) { case 0: // success // append NULL at the end if there is enough room if (condition_item_array[i].length > msgInLocaleLen) ((char *)(condition_item_array[i].var_ptr))[msgInLocaleLen] = '\0'; break; case -1: // CNV_ERR_INVALID_CHAR Character in input cannot be converted // impossible condition // we enable the "allow invalid characters" option which will // substitute every invalid character with the ? character. case -2: // CNV_ERR_BUFFER_OVERRUN No output buffer or not big enough // No problem, a truncated message is better than nothing // The truncated message does not include the NULL terminator // assert (msgInLocaleLen == condition_item_array[i].length); case -3: // CNV_ERR_NOINPUT No input buffer or input cnt <= 0 // assert (msgInLocaleLen == 0 && (wMsgLen == 0 || NAWstrlen(p) == 0)); case -4: // CNV_ERR_INVALID_CS Invalid Character Set specified case -5: // CNV_ERR_INVALID_VERS Invalid version specified default: // impossible conditions break; } // switch } if (msgInLocaleLen == 0 && wMsgLen > 0 && condition_item_array[i].length > 80) { // UTF8 char * pMsg = (char *)condition_item_array[i].var_ptr; // SJIS const char *initMsg = // ISO88591 "*** ERROR[3066] Unable to convert error message from UTF16 to "; // NULL //012345678901234567890123456789012345678901234567890123456789012345678901 // 1 3 4 5 6 7 8 pMsg[0] = '\0'; str_cat(pMsg, initMsg, pMsg); str_cat(pMsg, SQLCHARSETSTRING_UTF8, pMsg); msgInLocaleLen = str_len(pMsg); } // if (msgInLocaleLen == 0 && wMsgLen > 0 && ... > 80) } break; } // end of switch (condition_item_array[i].charset) if(msgInLocaleLen == 0) *(char *)condition_item_array[i].var_ptr = 0; } else { *(char *)condition_item_array[i].var_ptr = 0; } break; // R2 // the message length is the same regardless of the charset // it is true for R2 and the foreseen post R2. case SQLDIAG_MSG_LEN: /* (numeric) message length in characters */ try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_OutputValueIntoNumericHostvar(GetCliGlobals(), output_descriptor, condition_item_array[i].output_entry + 1, condition.getMessageLength()); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); break; case SQLDIAG_MSG_OCTET_LEN: /* (numeric) message length in bytes */ try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_OutputValueIntoNumericHostvar(GetCliGlobals(), output_descriptor, condition_item_array[i].output_entry + 1, msgInLocaleLen); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); if(msgInLocaleLenIsAvailable == FALSE) { retcode = -CLI_INTERNAL_ERROR; } retcode = RecordError(NULL, retcode); break; } // switch (condition_item_array[i].item_id) } // for (Lng32 i = 0; i < condition_item_count && retcode == 0; i++) } // if (condition_item_count > 0) delete [] condition_item_array; diagsArea->clear(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDIAGNOSTICSCONDINFO( /*IN*/ SQLDIAG_COND_INFO_ITEM *cond_info_items, /*IN*/ SQLDESC_ID * cond_num_descriptor, /*IN*/ SQLDESC_ID * output_descriptor){ return SQL_EXEC_GetDiagnosticsCondInfo(cond_info_items, cond_num_descriptor, output_descriptor); }; #endif #pragma nowarn(770) // warning elimination #ifndef CLI_PRIV_SRL SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDiagnosticsCondInfo2( /*IN* (SQLDIAG_COND_INFO_ITEM_ID) */ Lng32 what_to_get, /*IN*/ Lng32 conditionNum, /*OUT OPTIONAL*/ Lng32 * numeric_value, /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL */ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); switch (what_to_get) { case SQLDIAG_RET_SQLSTATE: /* (string ) returned SQLSTATE */ case SQLDIAG_CLASS_ORIG: /* (string ) class origin, e.g. ISO 9075 */ case SQLDIAG_SUBCLASS_ORIG: /* (string ) subclass origin, e.g. ISO 9075*/ case SQLDIAG_MSG_TEXT: /* (string ) message text */ case SQLDIAG_MSG_LEN: /* (numeric) message length in characters */ case SQLDIAG_MSG_OCTET_LEN: /* (numeric) message length in bytes */ { Lng32 rc; // for these cases, call GetDiagnosticsCondInfo(). SQLDIAG_COND_INFO_ITEM cond_info_item; cond_info_item.item_id = what_to_get; cond_info_item.cond_number_desc_entry = 1; SQLMODULE_ID module; module.module_name = 0; module.module_name_len = 0; SQLDESC_ID cond_num_descriptor = { 1, desc_name, &module, "dummy_input_name", 0, 0, 10 }; Lng32 i = conditionNum; rc = SQL_EXEC_AllocDesc(&cond_num_descriptor, 1); rc = SQL_EXEC_SetDescItem(&cond_num_descriptor, 1, SQLDESC_TYPE_FS, REC_BIN32_SIGNED, 0); rc = SQL_EXEC_SetDescPointers(&cond_num_descriptor,1,1,&(i),0); SQLDESC_ID output_descriptor = { 1, desc_name, &module, "dummy_output_name", 0, 0, 11 }; rc = SQL_EXEC_AllocDesc(&output_descriptor, 1); if ((what_to_get == SQLDIAG_MSG_LEN) || (what_to_get == SQLDIAG_MSG_OCTET_LEN)) { rc = SQL_EXEC_SetDescItem(&output_descriptor, 1, SQLDESC_TYPE_FS, REC_BIN32_SIGNED, 0); rc = SQL_EXEC_SetDescPointers(&output_descriptor,1,1,numeric_value,0); } else { rc = SQL_EXEC_SetDescItem(&output_descriptor, 1, SQLDESC_TYPE_FS, REC_MIN_V_N_CHAR_H, 0); rc = SQL_EXEC_SetDescItem(&output_descriptor, 1, SQLDESC_LENGTH, max_string_len, 0); rc = SQL_EXEC_SetDescPointers(&output_descriptor,1,1,string_value,0); } rc = SQL_EXEC_GetDiagnosticsCondInfo( &cond_info_item, &cond_num_descriptor, &output_descriptor); if ((what_to_get == SQLDIAG_RET_SQLSTATE) || (what_to_get == SQLDIAG_CLASS_ORIG) || (what_to_get == SQLDIAG_SUBCLASS_ORIG) || (what_to_get == SQLDIAG_MSG_TEXT)) { if (len_of_item) *len_of_item = str_len(string_value); } rc = SQL_EXEC_DeallocDesc(&cond_num_descriptor); rc = SQL_EXEC_DeallocDesc(&output_descriptor); } break; default: { retcode = SQLCLI_GetDiagnosticsCondInfo2( GetCliGlobals(), what_to_get, conditionNum, numeric_value, string_value, max_string_len, len_of_item); } } } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } #pragma warn(770) // warning elimination SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDIAGNOSTICSCONDINFO2( /*IN* (SQLDIAG_COND_INFO_ITEM_ID) */ Lng32 what_to_get, /*IN*/ Lng32 conditionNum, /*OUT OPTIONAL*/ Lng32 * numeric_value, /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL */ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item) { return SQL_EXEC_GetDiagnosticsCondInfo2(what_to_get, conditionNum, numeric_value, string_value, max_string_len, len_of_item); } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDiagnosticsCondInfo3( /*IN*/ Lng32 no_of_condition_items, /*IN*/ SQLDIAG_COND_INFO_ITEM_VALUE diag_cond_info_item_values[]) { Lng32 retcode = 0; IpcMessageObjSize message_obj_size_needed = 0; IpcMessageObjSize message_obj_size; IpcMessageBufferPtr message_buffer_ptr; IpcMessageObjType message_obj_type; IpcMessageObjVersion message_obj_version; ComDiagsArea *diagsArea; SQLDIAG_COND_INFO_ITEM_VALUE diagItemValues; Lng32 max_string_len; Lng32 what_to_get; Lng32 conditionNum; Lng32 * numeric_value; Lng32 * len_of_item; char * string_value; NAWchar* p = NULL; NABoolean msgInLocaleLenIsAvailable = FALSE; Lng32 msgInLocaleLen = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDiagnosticsCondInfo3(GetCliGlobals(), no_of_condition_items, diag_cond_info_item_values, &message_obj_size_needed); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); if (message_obj_size_needed > 0) { NAHeap heap("NonPriv CLI", NAMemory::DERIVED_FROM_SYS_HEAP, message_obj_size_needed*2); // We multiply by two to account for packing diagsArea = ComDiagsArea::allocate(&heap); message_buffer_ptr = new char[message_obj_size_needed]; message_obj_size = message_obj_size_needed; SQLMXLoggingArea::ExperienceLevel emsEventEL = SQLMXLoggingArea::eAdvancedEL; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDiagnosticsArea(GetCliGlobals(), message_buffer_ptr, message_obj_size, &message_obj_type, &message_obj_version); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { delete [] message_buffer_ptr; diagsArea->clear(); diagsArea->deAllocate(); threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); //LCOV_EXCL_START if (retcode < 0) { delete [] message_buffer_ptr; diagsArea->clear(); diagsArea->deAllocate(); return retcode; } //LCOV_EXCL_STOP // The actual diags area resides in the context heap and is inaccessible // here in non-priv code. We must unpack the copy which has been // packed for us in the buffer pointed to by message_buffer_ptr. diagsArea->unpackObj(message_obj_type, message_obj_version, TRUE, message_obj_size_needed, message_buffer_ptr); for(Int32 i=0; i<no_of_condition_items && retcode == 0; i++) { diagItemValues = diag_cond_info_item_values[i]; //LCOV_EXCL_START if (diagItemValues.num_val_or_len == NULL) { delete [] message_buffer_ptr; diagsArea->clear(); diagsArea->deAllocate(); return -CLI_INVALID_ATTR_VALUE; } //LCOV_EXCL_STOP string_value = diagItemValues.string_val; max_string_len = string_value ? *(diagItemValues.num_val_or_len) : 0; conditionNum = diagItemValues.item_id_and_cond_number.cond_number_desc_entry; ComCondition & condition = diagsArea->operator[](conditionNum); what_to_get = diagItemValues.item_id_and_cond_number.item_id; numeric_value = diagItemValues.num_val_or_len; len_of_item = diagItemValues.num_val_or_len; // if (condition.getIso88591MappingCharSet() == CharInfo::UnknownCharSet) // { // // store the iso_mapping cqd setting to the ComCondition object // // for later use by its methods to do the charset conversion // condition.setIso88591MappingCharSet(isoMapCS); // } switch (what_to_get) { case SQLDIAG_RET_SQLSTATE: /* (string ) returned SQLSTATE */ { if(condition.getCustomSQLState() == NULL) { //LCOV_EXCL_START if (max_string_len < 6) { delete [] message_buffer_ptr; diagsArea->clear(); diagsArea->deAllocate(); return -CLI_INVALID_ATTR_VALUE; } //LCOV_EXCL_STOP ComSQLSTATE(condition.getSQLCODE(), string_value); if (len_of_item) *len_of_item = 6; } else { copyResultString(string_value, condition.getCustomSQLState(), max_string_len, len_of_item); } } break; /* GetDiagnosticsCondInfo3 currently returns sqlcode or a part of it for these condition items. They do not appear to be fully supported yet. */ case SQLDIAG_CLASS_ORIG: /* (string ) class origin, e.g. ISO 9075 */ copyResultString(string_value, condition.getClassOrigin(), max_string_len, len_of_item); break; case SQLDIAG_SUBCLASS_ORIG: /* (string ) subclass origin, e.g. ISO 9075*/ copyResultString(string_value, condition.getSubClassOrigin(), max_string_len, len_of_item); break; // Currently this method assumes that the target is ISO_MAPPING CQD CharSet // - For SeaQuest, the target character set is UTF8. // This CLI call is currently used only by ODBC/JDBC // To make this method support other charsets, we will need to change the signature // of this method. This asumption affects the next three cases (SQLDIAG_MSG_TEXT, // SQLDIAG_MSG_LEN, and SQLDIAG_MSG_OCTET_LEN) case SQLDIAG_MSG_TEXT: /* (string ) message text */ { logAnMXEventForError(condition, emsEventEL); msgInLocaleLenIsAvailable = TRUE; p = (NAWchar *)(condition.getMessageText(TRUE, // NABoolean prefixAdded CharInfo::UTF8 ) ); // msg in UCS2 if ( p && max_string_len > 0) { Int32 wMsgLen = condition.getMessageLength(); // msg length in UCS2 characters { Lng32 cnvErrStatus = 0; char * pFirstUntranslatedChar = NULL; cnvErrStatus = SQL_EXEC_UTF16ToLocale( cnv_UTF8, ((void *)p), // IN void * Input_Buffer_Addr wMsgLen * 2 /* bytes per UCS2 char */, // IN Int32 Input_Buffer_Octet_Length ((void *)string_value), // I/O void * Output_Buffer_Addr max_string_len, // IN Int32 Output_Buffer_Octet_Length (void **)&pFirstUntranslatedChar, // OUT void * * First_Untranslated_Char_Addr &msgInLocaleLen, // OUT Int32 Output_Data_Octet_Length 0, // IN Int32 conv_flags (Lng32)FALSE, // IN Int32 add_null_at_end_Flag (Lng32)TRUE, // IN Int32 allow_invalids (Lng32 *)NULL, // OUT int * num_translated_char NULL // IN void * substitution_char_addr ); // use ? as the substitute char switch(cnvErrStatus) { case 0: // success // append NULL at the end if there is enough room if (max_string_len > msgInLocaleLen) string_value[msgInLocaleLen] = '\0'; break; case -1: // CNV_ERR_INVALID_CHAR Character in input cannot be converted // impossible condition // we enable the "allow invalid characters" option which will // substitute every invalid character with the ? character. case -2: // CNV_ERR_BUFFER_OVERRUN No output buffer or not big enough // No problem, a truncated message is better than nothing // The truncated message does not include the NULL terminator // assert (msgInLocaleLen == max_string_len); case -3: // CNV_ERR_NOINPUT No input buffer or input cnt <= 0 // assert (msgInLocaleLen == 0 && (wMsgLen == 0 || NAWstrlen(p) == 0)); case -4: // CNV_ERR_INVALID_CS Invalid Character Set specified case -5: // CNV_ERR_INVALID_VERS Invalid version specified default: // impossible conditions break; } // switch } //LCOV_EXCL_START if (msgInLocaleLen == 0 && wMsgLen > 0 && max_string_len > 80) { // UTF8 char * pMsg = string_value; // SJIS const char *initMsg = // ISO88591 "*** ERROR[3066] Unable to convert error message from UTF16 to "; // NULL //012345678901234567890123456789012345678901234567890123456789012345678901 // 1 3 4 5 6 7 88 pMsg[0] = '\0'; str_cat(pMsg, initMsg, pMsg); str_cat(pMsg, SQLCHARSETSTRING_UTF8, pMsg); msgInLocaleLen = str_len(pMsg); } // if (msgInLocaleLen == 0 && wMsgLen > 0 && max_string_len > 80) //LCOV_EXCL_STOP } if(msgInLocaleLen == 0) *string_value = 0; if(len_of_item) *len_of_item = msgInLocaleLen; } break; case SQLDIAG_MSG_LEN: /* (numeric) message length in characters */ *numeric_value = condition.getMessageLength(); break; case SQLDIAG_MSG_OCTET_LEN: /* (numeric) message length in bytes */ *numeric_value = msgInLocaleLen; if(msgInLocaleLenIsAvailable == FALSE) { retcode = -CLI_INTERNAL_ERROR; } break; } } delete [] message_buffer_ptr; diagsArea->clear(); diagsArea->deAllocate(); } return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETDIAGNOSTICSCONDINFO3( /*IN*/ Lng32 no_of_condition_items, /*IN*/ SQLDIAG_COND_INFO_ITEM_VALUE diag_cond_info_item_values[]) { return SQL_EXEC_GetDiagnosticsCondInfo3(no_of_condition_items, diag_cond_info_item_values); } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetMainSQLSTATE( /*IN*/ SQLSTMT_ID * stmtId, /*IN*/ Lng32 sqlcode, /*OUT*/ char * sqlstate /* assumed to be char[6] */) { Lng32 retcode = 0; CLI_NONPRIV_PROLOGUE(retcode); char localSQLSTATE[6] = "\0"; // Since cliglobals and executor context are not accessible from // here, this part needs to be rewritten similar to // SQL_EXEC_GetDiagnosticsCondInfo. TBD. // For now, just return the regular sqlstate. ComSQLSTATE(sqlcode,localSQLSTATE); str_cpy_all(sqlstate, localSQLSTATE, 5); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETMAINSQLSTATE( /*IN*/ SQLSTMT_ID * statement_id, /*IN*/ Lng32 sqlcode, /*OUT*/ char * sqlstate /* assumed to be char[6] */) { return SQL_EXEC_GetMainSQLSTATE(statement_id, sqlcode, sqlstate); } //LCOV_EXCL_STOP #endif // CLI_PRIV_SRL #ifndef CLI_PRIV_SRL SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetCSQLSTATE(/*OUT*/ char * theSQLSTATE /* assumed char[6] */, /*IN*/ Lng32 theSQLCODE) { Lng32 retcode = 0; CLI_NONPRIV_PROLOGUE(retcode); ComSQLSTATE(theSQLCODE,theSQLSTATE); return retcode; } #else SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetCSQLSTATE(/*OUT*/ char * theSQLSTATE /* assumed char[6] */, /*IN*/ Lng32 theSQLCODE) { Lng32 retcode = 0; // when this method is called from a priv caller(executor), then // we cannot look at the msgs file(it could be looked at only thru // a non-priv interface since it uses c-runtime and globals). // Just return the string representation of the input state. ULng32 sqlCodeUnsigned; if (theSQLCODE < 0) sqlCodeUnsigned = -theSQLCODE; else sqlCodeUnsigned = theSQLCODE; str_itoa(sqlCodeUnsigned, theSQLSTATE); if (theSQLCODE < 0) { for (Int32 i = 5; i > 0; i--) theSQLSTATE[i] = theSQLSTATE[i-1]; theSQLSTATE[0] = '-'; } return retcode; } #endif //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETCSQLSTATE( /*OUT*/ char * sqlstate /* assumed to be char[6] */, /*IN*/ Lng32 sqlcode){ return SQL_EXEC_GetCSQLSTATE(sqlstate, sqlcode); }; #ifndef CLI_PRIV_SRL SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetCobolSQLSTATE(/*OUT*/ char * theSQLSTATE /*assumed char[5]*/, /*IN*/ Lng32 theSQLCODE) { Lng32 retcode = 0; char localSQLSTATE[6]; ComSQLSTATE(theSQLCODE,localSQLSTATE); str_cpy_all(theSQLSTATE, localSQLSTATE, 5); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETCOBOLSQLSTATE( /*OUT*/ char * sqlstate /* assumed to be char[5] */, /*IN*/ Lng32 sqlcode){ return SQL_EXEC_GetCobolSQLSTATE(sqlstate, sqlcode); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetSQLSTATE(/*OUT*/ char * SQLSTATE /* assumed to be char[6] */) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; Lng32 sqlcode; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetSQLCODE(GetCliGlobals(), &sqlcode); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); ComSQLSTATE(sqlcode, SQLSTATE); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETSQLSTATE( /*OUT*/ char * sqlstate /* assumed to be char[6] */){ return SQL_EXEC_GetSQLSTATE(sqlstate); }; //LCOV_EXCL_STOP #endif SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetSessionAttr( /*IN (SESSIONATTR_TYPE )*/ Lng32 attrName, /*OUT OPTIONAL*/ Lng32 * numeric_value, /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetSessionAttr(GetCliGlobals(), attrName, numeric_value, string_value, max_string_len, len_of_item); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } Lng32 SQL_EXEC_GetAuthID( const char * authName, Lng32 & authID) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetAuthID(GetCliGlobals(), authName, authID); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetAuthName_Internal( Lng32 auth_id, char *string_value, Lng32 max_string_len, Lng32 &len_of_item) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetAuthName(GetCliGlobals(), auth_id, string_value, max_string_len, len_of_item); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDatabaseUserName_Internal ( /*IN*/ Lng32 user_id, /*OUT*/ char *string_value, /*IN*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 *len_of_item) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDatabaseUserName(GetCliGlobals(), user_id, string_value, max_string_len, len_of_item); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDatabaseUserID_Internal (/*IN*/ char *string_value, /*OUT*/ Lng32 *numeric_value) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDatabaseUserID(GetCliGlobals(), string_value, numeric_value); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Int32 SQL_EXEC_GetAuthState_Internal( /*OUT*/ bool & authenticationEnabled, /*OUT*/ bool & authorizationEnabled, /*OUT*/ bool & authorizationReady, /*OUT*/ bool & auditingEnabled) { Int32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetAuthState(GetCliGlobals(), authenticationEnabled, authorizationEnabled, authorizationReady, auditingEnabled); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetSessionAttr_Internal( /*IN (SESSIONATTR_TYPE)*/ Lng32 attrName, /*IN OPTIONAL*/ Lng32 numeric_value, /*IN OPTIONAL*/ char *string_value) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetSessionAttr(GetCliGlobals(), attrName, numeric_value, string_value); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetUniqueQueryIdAttrs( /*IN*/ char * uniqueQueryId, /*IN*/ Lng32 uniqueQueryIdLen, /*IN*/ Lng32 no_of_attrs, /*INOUT*/ UNIQUEQUERYID_ATTR unique_queryid_attrs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetUniqueQueryIdAttrs(GetCliGlobals(), uniqueQueryId, uniqueQueryIdLen, no_of_attrs, unique_queryid_attrs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetStmtAttr(/*IN*/ SQLSTMT_ID * statement_id, /*IN (SQLATTR_TYPE )*/ Lng32 attrName, /*OUT OPTIONAL*/ Lng32 * numeric_value, /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetStmtAttr(GetCliGlobals(), statement_id, attrName, numeric_value, string_value, max_string_len, len_of_item); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETSTMTATTR(/*IN*/ SQLSTMT_ID * statement_id, /*IN* (SQLATTR_TYPE) */ Lng32 attrName, /*OUT OPTIONAL*/ Lng32 * numeric_value, /*OUT OPTIONAL*/ char * string_value, /*IN OPTIONAL*/ Lng32 max_string_len, /*OUT OPTIONAL*/ Lng32 * len_of_item) { return SQL_EXEC_GetStmtAttr(statement_id, attrName, numeric_value, string_value, max_string_len, len_of_item); } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetStmtAttrs(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ Lng32 number_of_attrs, /*INOUT*/ SQLSTMT_ATTR attrs[], /*OUT OPTIONAL*/ Lng32 * num_returned) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetStmtAttrs(GetCliGlobals(), statement_id, number_of_attrs, attrs, num_returned); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETSTMTATTRS(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ Lng32 number_of_attrs, /*INOUT*/ SQLSTMT_ATTR attrs[], /*OUT OPTIONAL*/ Lng32 * num_returned) { return SQL_EXEC_GetStmtAttrs(statement_id, number_of_attrs, attrs, num_returned); } //LCOV_EXCL_STOP #ifdef __cplusplus extern "C" { #endif SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetStatistics(/*IN OPTIONAL*/ SQLSTMT_ID * statement_id, /*INOUT*/ SQL_QUERY_STATISTICS *query_statistics) { return -CLI_INTERNAL_ERROR; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETSTATISTICS( /*IN OPTIONAL*/ SQLSTMT_ID * statement_id) { return SQL_EXEC_GetStatistics(statement_id, NULL); }; #ifdef __cplusplus } #endif /*__cplusplus*/ SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GETMPCATALOG( /*IN*/ char * ANSIObjName, /*INOUT*/ char * MPObjName, /*IN*/ Lng32 MPObjNameMaxLen, /*INOUT*/ Lng32 * MPObjNameLen, /*OUT*/ char * MPCatalogName, /*IN*/ Lng32 MPCatalogNameMaxLen, /*OUT*/ Lng32 * MPCatalogNameLen) { Lng32 retcode = 0; return retcode; }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetPfsSize(/*OUT*/Int32 *pfsSize, /*OUT*/Int32 *pfsCurUse, /*OUT*/Int32 *pfsMaxUse) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetPfsSize(GetCliGlobals(), pfsSize, pfsCurUse, pfsMaxUse); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } Lng32 SQL_EXEC_CleanUpPfsResources() { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_CleanUpPfsResources(GetCliGlobals()); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_Prepare(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_Prepare(GetCliGlobals(), statement_id, sql_source); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_Prepare2(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source, /*INOUT*/ char * gencode_ptr, /*IN*/ Lng32 gencode_len, /*INOUT*/ Lng32 * ret_gencode_len, /*INOUT*/ SQL_QUERY_COST_INFO *query_cost_info, /*INOUT*/ SQL_QUERY_COMPILER_STATS_INFO *comp_stats_info, /*INOUT*/ char * uniqueStmtId, /*INOUT*/ Lng32 * uniqueStmtIdLen, /*IN*/ ULng32 flags) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_Prepare2(GetCliGlobals(), statement_id, sql_source, gencode_ptr, gencode_len, ret_gencode_len, query_cost_info, comp_stats_info, uniqueStmtId, uniqueStmtIdLen, flags); // -2008 is an internal error - don't attempt AQR in case of internal errors if ((retcode < 0) && (retcode != -2008)) { retcode = SQLCLI_ProcessRetryQuery(GetCliGlobals(), statement_id, retcode, 1, 0, 0, 0); } } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_PREPARE( /*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLDESC_ID * sql_source){ return SQL_EXEC_Prepare(statement_id, sql_source); }; SQLCLI_LIB_FUNC Int32 SQL_EXEC_GetExplainData( /*IN*/ SQLSTMT_ID * statement_id, /*INOUT*/ char * explain_ptr, /*IN*/ Int32 explain_len, /*INOUT*/ Int32 * ret_explain_len) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetExplainData(GetCliGlobals(), statement_id, explain_ptr, explain_len, ret_explain_len); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Int32 SQL_EXEC_StoreExplainData( /*IN*/ Int64 * exec_start_utc_ts, /*IN*/ char * query_id, /*INOUT*/ char * explain_ptr, /*IN*/ Int32 explain_len) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_StoreExplainData(GetCliGlobals(), exec_start_utc_ts, query_id, explain_ptr, explain_len); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ResDescName(/*INOUT*/ SQLDESC_ID * statement_id, /*IN OPTIONAL*/ SQLSTMT_ID * from_statement, /* (SQLWHAT_DESC) *IN OPTIONAL*/ Lng32 what_desc) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ResDescName(GetCliGlobals(), statement_id, from_statement, what_desc); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_RESDESCNAME( /*INOUT*/ SQLDESC_ID * statement_id, /*IN OPTIONAL*/ SQLSTMT_ID * from_statement, /*IN OPTIONAL (SQLWHAT_DESC) */ Lng32 what_desc){ return SQL_EXEC_ResDescName(statement_id, from_statement, what_desc); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ResStmtName(/*INOUT*/ SQLSTMT_ID * statement_id) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ResStmtName(GetCliGlobals(), statement_id); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_RESSTMTNAME( /*INOUT*/ SQLSTMT_ID * statement_id){ return SQL_EXEC_ResStmtName(statement_id); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetCursorName(/*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLSTMT_ID * cursor_name) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetCursorName(GetCliGlobals(), statement_id, cursor_name); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); retcode = RecordError(statement_id, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETCURSORNAME( /*IN*/ SQLSTMT_ID * statement_id, /*IN*/ SQLSTMT_ID * cursor_name){ return SQL_EXEC_SetCursorName(statement_id, cursor_name); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetStmtAttr(/*IN*/ SQLSTMT_ID * statement_id, /*IN* (SQLATTR_TYPE) */ Lng32 attrName, /*IN OPTIONAL*/ Lng32 numeric_value, /*IN OPTIONAL*/ char * string_value) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetStmtAttr(GetCliGlobals(), statement_id, attrName, numeric_value, string_value); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); if ( (retcode != 0) && (attrName != SQL_ATTR_COPY_STMT_ID_TO_DIAGS) ) RecordError( statement_id, retcode ); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETSTMTATTR(/*IN*/ SQLSTMT_ID * statement_id, /*IN* (SQLATTR_TYPE) */ Lng32 attrName, /*IN OPTIONAL*/ Lng32 numeric_value, /*IN OPTIONAL*/ char * string_value) { return SQL_EXEC_SetStmtAttr( statement_id, attrName, numeric_value, string_value); } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetDescEntryCount(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ID * input_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetDescEntryCount(GetCliGlobals(), sql_descriptor, input_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETDESCENTRYCOUNT( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ID * input_descriptor){ return SQL_EXEC_SetDescEntryCount(sql_descriptor, input_descriptor); }; SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetDescEntryCountBasic(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 num_entries) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetDescEntryCountInt(GetCliGlobals(), sql_descriptor, num_entries); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetDescItem(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 entry, /* (SQLDESC_ITEM_ID) *IN*/ Lng32 what_to_set, /*IN OPTIONAL*/ Long numeric_value, /*IN OPTIONAL*/ char * string_value) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetDescItem(GetCliGlobals(), sql_descriptor, entry, what_to_set, numeric_value, string_value); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETDESCITEM( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 entry, /*IN* (SQLDESC_ITEM_ID) */ Lng32 what_to_set, /*IN OPTIONAL*/ Long numeric_value, /*IN OPTIONAL*/ char * string_value){ return SQL_EXEC_SetDescItem(sql_descriptor, entry, what_to_set, numeric_value, string_value); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetDescItems(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ITEM desc_items[], /*IN*/ SQLDESC_ID * value_num_descriptor, /*IN*/ SQLDESC_ID * input_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetDescItems(GetCliGlobals(), sql_descriptor, desc_items, value_num_descriptor, input_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETDESCITEMS( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ SQLDESC_ITEM desc_items[], /*IN*/ SQLDESC_ID * value_num_descriptor, /*IN*/ SQLDESC_ID * input_descriptor) { return SQL_EXEC_SetDescItems(sql_descriptor, desc_items, value_num_descriptor, input_descriptor); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetDescItems2( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 no_of_desc_items, /*IN*/ SQLDESC_ITEM desc_items[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetDescItems2(GetCliGlobals(), sql_descriptor, no_of_desc_items, desc_items); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETDESCITEMS2( /*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 no_of_desc_items, /*IN*/ SQLDESC_ITEM desc_items[]) { return SQL_EXEC_SetDescItems2(sql_descriptor, no_of_desc_items, desc_items); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetDescPointers(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 starting_entry, /*IN*/ Lng32 num_ptr_pairs, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_ptr_pairs); retcode = SQLCLI_SetDescPointers(GetCliGlobals(), sql_descriptor, starting_entry, num_ptr_pairs, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETDESCPOINTERS(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 starting_entry, /*IN*/ Lng32 num_ptr_pairs, /*IN*/ SQLCLI_PTR_PAIRS ptr_pairs[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetDescPointers(GetCliGlobals(), sql_descriptor, starting_entry, num_ptr_pairs, 0, ptr_pairs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetRowsetDescPointers(SQLDESC_ID * sql_descriptor, Lng32 rowset_size, Lng32 *rowset_status_ptr, Lng32 starting_entry, Lng32 num_quadruple_fields, ...) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); va_list ap; va_start(ap, num_quadruple_fields); retcode = SQLCLI_SetRowsetDescPointers(GetCliGlobals(), sql_descriptor, rowset_size, rowset_status_ptr, starting_entry, num_quadruple_fields, ap, 0); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETROWSETDESCPOINTERS(SQLDESC_ID * sql_descriptor, Lng32 rowset_size, Lng32 *rowset_status_ptr, Lng32 starting_entry, Lng32 num_quadruple_fields, SQLCLI_QUAD_FIELDS quad_fields[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetRowsetDescPointers(GetCliGlobals(), sql_descriptor, rowset_size, rowset_status_ptr, starting_entry, num_quadruple_fields, 0, quad_fields); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SwitchContext(/*IN*/ SQLCTX_HANDLE ctxt_handle, /*OUT OPTIONAL*/ SQLCTX_HANDLE * prev_ctxt_handle) { Lng32 retcode; CLI_NONPRIV_PROLOGUE(retcode); try { retcode = SQLCLI_SwitchContext(GetCliGlobals(), ctxt_handle, prev_ctxt_handle); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { throw; } #endif } RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SWITCHCONTEXT( /*IN*/ SQLCTX_HANDLE context_handle, /*OUT OPTIONAL*/ SQLCTX_HANDLE * prev_context_handle){ return SQL_EXEC_SwitchContext(context_handle, prev_context_handle); }; //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_Xact(/*IN* (SQLTRANS_COMMAND) */ Lng32 command, /*OUT OPTIONAL*/ SQLDESC_ID * transid_descriptor) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_Xact(GetCliGlobals(), command, transid_descriptor); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_XACT( /*IN* (SQLTRANS_COMMAND) */ Lng32 command, /*OUT OPTIONAL*/ SQLDESC_ID * transid_descriptor){ return SQL_EXEC_Xact(command, transid_descriptor); }; //LCOV_EXCL_STOP Lng32 SQL_EXEC_SetAuthID( const char * externalUsername, const char * databaseUsername, const char * authToken, Int32 authTokenLen, Int32 effectiveUserID, Int32 sessionUserID) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetAuthID(GetCliGlobals(), externalUsername, databaseUsername, authToken, authTokenLen, effectiveUserID, sessionUserID); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL,retcode); return retcode; } /* temporary functions -- for use by sqlcat simulator only */ SQLCLI_LIB_FUNC Lng32 SQL_EXEC_AllocDesc(/*INOUT*/ SQLDESC_ID * desc_id, /*IN OPTIONAL*/ Lng32 max_entries) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AllocDescInt(GetCliGlobals(), desc_id, max_entries); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetDescEntryCount(/*IN*/ SQLDESC_ID * sql_descriptor, /*OUT*/ Lng32 * num_entries) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDescEntryCountInt(GetCliGlobals(), sql_descriptor, num_entries); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetDescEntryCount(/*IN*/ SQLDESC_ID * sql_descriptor, /*IN*/ Lng32 num_entries) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetDescEntryCountInt(GetCliGlobals(), sql_descriptor, num_entries); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } // For internal use only -- do not document! // This method merges the CLI diags area into the caller's diags area SQLCLI_LIB_FUNC Lng32 SQL_EXEC_MergeDiagnostics_Internal (/*INOUT*/ ComDiagsArea & newDiags) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_MergeDiagnostics(GetCliGlobals(), newDiags); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START // For internal use only -- do not document // A new CLI call to allow catman to specify the the CLI which version of compiler // to use to prepare a query. The caller can specify either a node name or a version but // not both. If both are specified an error will be returned. SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetCompilerVersion_Internal( short mxcmpVersionToUse, char *nodeName) { Lng32 retcode; CLI_NONPRIV_PROLOGUE(retcode); retcode = SQLCLI_SetCompilerVersion_Internal(GetCliGlobals(),mxcmpVersionToUse, nodeName); return retcode; } // A new CLI call to return the compiler vesion in the current context. // The caller can specify a node name to get the version of a remote node. SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetCompilerVersion_Internal( short &mxcmpVersionToUse, char *nodeName) { Lng32 retcode; CLI_NONPRIV_PROLOGUE(retcode); retcode = SQLCLI_GetCompilerVersion_Internal(GetCliGlobals(),mxcmpVersionToUse, nodeName); return retcode; } // For internal use only -- do not document! // This method returns the CLI diags area in packed format SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetPackedDiagnostics_Internal( /*OUT*/ char * message_buffer_ptr, /*IN*/ ULng32 message_obj_size, /*OUT*/ ULng32 * message_obj_size_needed, /*OUT*/ Lng32 * message_obj_type, /*OUT*/ Lng32 * message_obj_version) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetPackedDiagnostics(GetCliGlobals(), message_buffer_ptr, message_obj_size, message_obj_size_needed, message_obj_type, message_obj_version); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } // For internal use only -- do not document! SQLCLI_LIB_FUNC void SQL_EXEC_SetParserFlagsForExSqlComp_Internal(ULng32 flagbits) { SQL_EXEC_SetParserFlagsForExSqlComp_Internal2(flagbits); } // For internal use only -- do not document! SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetParserFlagsForExSqlComp_Internal2(ULng32 flagbits) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetParserFlagsForExSqlComp_Internal(GetCliGlobals(), flagbits); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } Lng32 SQL_EXEC_AssignParserFlagsForExSqlComp_Internal(ULng32 flagbits) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_AssignParserFlagsForExSqlComp_Internal(GetCliGlobals(), flagbits); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } Lng32 SQL_EXEC_GetParserFlagsForExSqlComp_Internal(ULng32 &flagbits) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetParserFlagsForExSqlComp_Internal(GetCliGlobals(), flagbits); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } // For internal use only -- do not document! SQLCLI_LIB_FUNC void SQL_EXEC_ResetParserFlagsForExSqlComp_Internal(ULng32 flagbits) { SQL_EXEC_ResetParserFlagsForExSqlComp_Internal2(flagbits); } // For internal use only -- do not document! SQLCLI_LIB_FUNC Lng32 SQL_EXEC_ResetParserFlagsForExSqlComp_Internal2(ULng32 flagbits) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_ResetParserFlagsForExSqlComp_Internal(GetCliGlobals(), flagbits); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } Lng32 SQL_EXEC_DeleteHbaseJNI() { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); HBaseClient_JNI::deleteInstance(); HiveClient_JNI::deleteInstance(); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_START // For internal use only -- do not document! SQLCLI_LIB_FUNC const char *const *const SQL_EXEC_GetListOfVolumes_Internal() { return SQLCLI_GetListOfVolumes_Internal(); } // SQL_EXEC_GetListOfVolumes_Internal() // For internal use only -- do not document! SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetListOfAuditedVolumes_Internal( /*INOUT*/ char **volNames, /*INOUT*/ Lng32 *numOfVols) { Lng32 retcode=0; retcode = SQLCLI_GetListOfAuditedVolumes_Internal (GetCliGlobals(), volNames, numOfVols); return retcode; } // SQL_EXEC_GetListOfAuditedVolumes_Internal() // For internal use only -- do not document! SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetNumOfQualifyingVolumes_Internal (/*IN*/ const char *nodeName, /*INOUT*/ Lng32 *numOfVols) { Lng32 retcode=0; retcode = SQLCLI_GetNumOfQualifyingVolumes_Internal (GetCliGlobals(), nodeName, numOfVols); return retcode; } // SQL_EXEC_GetListOfAuditedVolumes_Internal() // For internal use only -- do not document! SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetListOfQualifyingVolumes_Internal (/*IN*/ const char *nodeName, /*IN*/ Lng32 numOfVols, /*OUT*/ char **volNames, /*OUT*/ Lng32 *cpuNums, /*OUT*/ Lng32 *capacities, /*OUT*/ Lng32 *freespaces, /*OUT*/ Lng32 *largestFragments) { Lng32 retcode=0; retcode = SQLCLI_GetListOfQualifyingVolumes_Internal (GetCliGlobals(), nodeName, numOfVols, volNames, cpuNums, capacities, freespaces, largestFragments); return retcode; } // SQL_EXEC_GetListOfAuditedVolumes_Internal() // For internal use only -- do not document! SQLCLI_LIB_FUNC short SQL_EXEC_GetDefaultVolume_Internal(char *const outBuf, const short outBufMaxLen, short &defaultVolLen) { short retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE_SHORT(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetDefaultVolume_Internal(outBuf, outBufMaxLen, defaultVolLen); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } // SQL_EXEC_GetDefaultVolume_Internal() //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_IsVolumeUseful_Internal (const char *const volName, short &fsErr) { const Int32 noOfItems = 3; short itemList[noOfItems] = {30, 31, 33}; short resultList[noOfItems]; if (SQLCLI_IsVolume7Chars_Internal(volName)) { fsErr = FEBADNAME; return FALSE; } MS_Mon_Process_Info_Type info; char dp2Name[100]; // including NULL terminator size_t dp2NameLen = 0; char * ptr = NULL; if (volName[0] == '\\') { size_t sysNameLen = 0; // volume qualified with node name - we don't really like that // except if the node name is \NSK. Go check if that is the case. ptr = str_chr(volName, '.'); if (ptr == NULL || (sysNameLen = ptr - volName) != 4 /* strlen("\\NSK") */) { // name is invalid, bail out fsErr = XZFIL_ERR_BADNAME; return FALSE; } else { char sysName[10]; str_cpy_all(sysName, volName, sysNameLen); sysName[sysNameLen] = '\0'; for (Int32 i = 0; i < sysNameLen; i++) sysName[i] = TOUPPER(sysName[i]); if (str_cmp(sysName, "\\NSK", sysNameLen) != 0) { // system name can only be \NSK (case insensitive) fsErr = XZFIL_ERR_BADNAME; return FALSE; } } ptr++; // advance past the period (dot) separator dp2NameLen = str_len(ptr); str_cpy_all(dp2Name, ptr, dp2NameLen); dp2Name[dp2NameLen] = '\0'; } else { // volName[0] should be '$' dp2NameLen = str_len(volName); str_cpy_all(dp2Name, volName, dp2NameLen); dp2Name[dp2NameLen] = '\0'; } fsErr = msg_mon_get_process_info_detail(dp2Name, // in - vol name w/out sys name part &info); // out - process information if (fsErr == XZFIL_ERR_OK) { // massage the outcome to resemble the one from FILE_GETINFOLISTBYNAME_ switch (info.type) { case MS_ProcessType_TSE: resultList[0] = 3; // a disk resultList[1] = 0; // not a fancy SQL/MP disk type resultList[2] = 1; // TSE volume - Always audited break; case MS_ProcessType_ASE: resultList[0] = 3; // a disk resultList[1] = 0; // not a fancy SQL/MP disk type resultList[2] = 0; // ASE Audit Trail volume - Not audited break; default: // Not a DP2 volume fsErr = XZFIL_ERR_NOSUCHDEV; break; } // switch } else if (fsErr == XZFIL_ERR_BOUNDSERR) { // -- process (name) not found -- // Replace this unfriendly fs error condition // with the one we are familiar with fsErr = XZFIL_ERR_NOSUCHDEV; } // Note that FEOK (used on Windows NT and NSK platforms) // and XZERR_FIL_OK are the same if (fsErr != FEOK) // Some file system error, assume that the device // is not a useful volume return FALSE; if (resultList[0] != 3) { // not a disk ... fsErr = FEINVALOP; return FALSE; } if ((resultList[1] == 36) || (resultList[1] == 56)) { // an SMF volume or an OSF fsErr = FEINVALOP; return FALSE; } if (resultList[2] == 0) { // not TMF volume fsErr = FEAUDITINVALOP; return FALSE; } // Volume is physical, available and audited return TRUE; } // SQL_EXEC_IsVolumeUseful_Internal() // For internal use only -- do not document! // // returns pointer pointing to the Tandem System Volume name // (NULL-terminated) cached in the Executor dynamic global // memory area. Returns NULL if cannot get the name (after // logging an error message in the Windows NT Even log). // // On NT make a call to this routine to get a pointer to the SMD location // in the CLI globals // // On NSK the CLI globals are hidden so we need the CLI to copy the SMD location // to a local variable. In this case allocate storage for SMDLocation and // make a call to SQL_EXEC_GetSystemVolume_InternalNSK(SMDLocation); //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetSystemVolume_Internal( /*INOUT*/ char * SMDlocation) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; Lng32 fsError; // unexposed error reporting. try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetSystemVolume_Internal(GetCliGlobals(), SMDlocation, &fsError); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_STOP // For internal use only -- do not document! // // returns pointer pointing to the Tandem System Volume name // (NULL-terminated). // SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetRemoteSystemVolume_Internal( /*INOUT*/ char * SMDlocation, /*IN*/ const char *nodeName, /*INOUT*/ Lng32 *fsError) { Lng32 retcode = 0; if ((nodeName == NULL) || (strcmp("NSK", nodeName) == 0) || (strcmp("\\NSK", nodeName) == 0)) { retcode = SQLCLI_GetSystemVolume_Internal(GetCliGlobals(), SMDlocation, fsError); } else { // Notice that there is no need to define the function // SQLCLI_GetRemoteSystemVolume_Internal for NT. // We do want to return sensibly looking error information, though. SMDlocation[0] = '\0'; retcode = -EXE_NAME_MAPPING_FS_ERROR; *fsError = FENOSUCHSYS; } return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetTotalTcbSpace(char *tdb, char * otherInfo) { Lng32 retcode = 0; retcode = SQLCLI_GetTotalTcbSpace(GetCliGlobals(), tdb, otherInfo); return retcode; } //******************************************************************** //* //* putenvCLI(const char * envStr) //* //* This function is being made available due to a specific //* request from ODBC. It should not be used by any other component //* since the results may be unknown. //* //* Technicalese : The component in which ODBC sets env. variables //* is a DLL. Due to some NT quirck, those env. //* variables are not visible in tdm_sqlcli.dll. //* This function would ensure that the environment //* is set in tdm_sqlcli.dll and will always be //* available. //* //* Futures : When the executor group implements the API for //* setting ContextOptions, then this function would //* not be required and should be removed from here. //* //* NOTE : The header for this function is NOT being made //* available via SQLCLIdev.h to prevent additional //* components from using it. //******************************************************************** #ifdef __cplusplus extern "C" { #endif //LCOV_EXCL_START SQLCLI_LIB_FUNC Int32 putenvCLI (const char * envStr) { return -1; } //LCOV_EXCL_STOP #ifdef __cplusplus } #endif // for now include this and build it here // #include "CliDll.cpp" SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetCollectStatsType_Internal ( /*OUT*/ ULng32 * collectStatsType, /*IN*/ SQLSTMT_ID * statement_id) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); retcode = SQLCLI_GetCollectStatsType_Internal(GetCliGlobals(), collectStatsType, statement_id); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_BreakEnabled_Internal ( /*IN*/ UInt32 enabled) { Lng32 retcode = 0; retcode = SQLCLI_BreakEnabled (GetCliGlobals(), enabled); return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SPBreakReceived_Internal ( /*OUT*/ UInt32 *breakRecvd) { Lng32 retcode = 0; *breakRecvd = 0; return retcode; } //LCOV_EXCL_STOP // For internal use only -- do not document! SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetEnviron_Internal(Lng32 propagate) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; #ifndef CLI_PRIV_SRL try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetEnviron_Internal(GetCliGlobals(), environ, propagate); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); #else // this method should not be called from inside of the priv srl. retcode = -CLI_INTERNAL_ERROR; #endif return retcode; } //LCOV_EXCL_START SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetVersion_Internal (/*IN*/ Lng32 versionType, /*OUT*/ Lng32 * versionValue, /*IN OPTIONAL*/ const char * nodeName, /*IN OPTIONAL*/ const SQLMODULE_ID * module_name, /*IN OPTIONAL*/ const SQLSTMT_ID * statement_id) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetVersion_Internal(GetCliGlobals(), versionType, versionValue, nodeName, module_name, statement_id); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } //LCOV_EXCL_STOP #ifdef __cplusplus extern "C" { #endif //LCOV_EXCL_START // For internal use only -- do not document! SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SETENVIRON_INTERNAL(Lng32 propagate) { return SQL_EXEC_SetEnviron_Internal(propagate); } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DecodeAndFormatKey( /*IN*/void * RCB_Pointer_Addr, /*IN*/void * KeyAddr, /*IN*/Int32 KeyLength, /*INOUT*/void * DecodedKeyBufAddr, /*INOUT*/void * FormattedKeyBufAddr, /*IN*/Int32 FormattedKeyBufLen, /*OUT*/Int32 * NeededKeyBufLen ) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetPartitionKeyFromRow( /*IN*/void * RCB_Pointer_Addr, /*IN*/void * Row_Addr, /*IN*/Int32 Row_Length, /*INOUT*/void * KeyAddr, /*IN*/Int32 KeyLength) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; RecordError(NULL, retcode); return retcode; } //LCOV_EXCL_STOP SQLCLI_LIB_FUNC Lng32 SQL_EXEC_LocaleToUTF8 ( /*IN*/Int32 conv_charset, /*IN*/void * Input_Buffer_Addr, /*IN*/Int32 Input_Buffer_Length, /*IN/OUT*/void * Output_Buffer_Addr, /*IN*/Int32 Output_Buffer_Length, /*OUT*/void ** First_Untranslated_Char_Addr, /*OUT*/Int32 * Output_Data_Length, /*IN*/Int32 add_null_at_end_Flag, /*OUT*/Int32 * num_translated_char) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); retcode = SQLCLI_LocaleToUTF8(GetCliGlobals(), conv_charset, Input_Buffer_Addr, Input_Buffer_Length, Output_Buffer_Addr, Output_Buffer_Length, First_Untranslated_Char_Addr, Output_Data_Length, add_null_at_end_Flag, num_translated_char); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_UTF8ToLocale( /*IN*/Int32 conv_charset, /*IN*/void * Input_Buffer_Addr, /*IN*/Int32 Input_Buffer_Length, /*IN/OUT*/void * Output_Buffer_Addr, /*IN*/Int32 Output_Buffer_Length, /*OUT*/void ** First_Untranslated_Char_Addr, /*OUT*/Int32 * Output_Data_Length, /*IN*/Int32 add_null_at_end_Flag, /*IN*/Int32 allow_invalids, /*OUT*/Int32 * num_translated_char, /*IN*/void * substitution_char_addr) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); retcode = SQLCLI_UTF8ToLocale(GetCliGlobals(), conv_charset, Input_Buffer_Addr, Input_Buffer_Length, Output_Buffer_Addr, Output_Buffer_Length, First_Untranslated_Char_Addr, Output_Data_Length, add_null_at_end_Flag, allow_invalids, num_translated_char,substitution_char_addr); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_LocaleToUTF16 ( /*IN*/Int32 conv_charset, /*IN*/void * Input_Buffer_Addr, /*IN*/Int32 Input_Buffer_Length, /*IN/OUT*/void * Output_Buffer_Addr, /*IN*/Int32 Output_Buffer_Length, /*OUT*/void ** First_Untranslated_Char_Addr, /*OUT*/Int32 * Output_Data_Length, /*IN*/Int32 conv_flags, /*IN*/Int32 add_null_at_end_Flag, /*OUT*/Int32 * num_translated_char) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); retcode = SQLCLI_LocaleToUTF16(GetCliGlobals(), conv_charset, Input_Buffer_Addr, Input_Buffer_Length, Output_Buffer_Addr, Output_Buffer_Length, First_Untranslated_Char_Addr, Output_Data_Length, conv_flags, add_null_at_end_Flag, num_translated_char); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_UTF16ToLocale( /*IN*/Int32 conv_charset, /*IN*/void * Input_Buffer_Addr, /*IN*/Int32 Input_Buffer_Length, /*IN/OUT*/void * Output_Buffer_Addr, /*IN*/Int32 Output_Buffer_Length, /*OUT*/void ** First_Untranslated_Char_Addr, /*OUT*/Int32 * Output_Data_Length, /*IN*/Int32 conv_flags, /*IN*/Int32 add_null_at_end_Flag, /*IN*/Int32 allow_invalids, /*OUT*/Int32 * num_translated_char, /*IN*/void * substitution_char_addr) { Lng32 retcode = 0; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); retcode = SQLCLI_UTF16ToLocale(GetCliGlobals(), conv_charset, Input_Buffer_Addr, Input_Buffer_Length, Output_Buffer_Addr, Output_Buffer_Length, First_Untranslated_Char_Addr, Output_Data_Length, conv_flags, add_null_at_end_Flag, allow_invalids, num_translated_char, substitution_char_addr); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetErrorCodeInRTS( /*IN*/ SQLSTMT_ID * statement_id, /*IN*/ Lng32 sqlErrorCode) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetErrorCodeInRTS(GetCliGlobals(), statement_id, sqlErrorCode); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_SetSecInvalidKeys( /* IN */ Int32 numSiKeys, /* IN */ SQL_QIKEY siKeys[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SetSecInvalidKeys(GetCliGlobals(), numSiKeys, siKeys); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetSecInvalidKeys( /* IN */ Int64 prevTimestamp, /* IN/OUT */ SQL_QIKEY siKeys[], /* IN */ Int32 maxNumSiKeys, /* IN/OUT */ Int32 *returnedNumSiKeys, /* IN/OUT */ Int64 *maxTimestamp) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetSecInvalidKeys(GetCliGlobals(), prevTimestamp, siKeys, maxNumSiKeys, returnedNumSiKeys, maxTimestamp); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetStatistics2( /* IN */ short statsReqType, /* IN */ char *statsReqStr, /* IN */ Lng32 statsReqStrLen, /* IN */ short activeQueryNum, /* IN */ short statsMergeType, /* OUT */ short *statsCollectType, /* IN/OUT */ SQLSTATS_DESC sqlStats_desc[], /* IN */ Lng32 max_stats_desc, /* OUT */ Lng32 *no_returned_stats_desc) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetStatistics2(GetCliGlobals(), statsReqType, statsReqStr, statsReqStrLen, activeQueryNum, statsMergeType, statsCollectType, sqlStats_desc, max_stats_desc, no_returned_stats_desc); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetStatisticsItems( /* IN */ short statsReqType, /* IN */ char *queryId, /* IN */ Lng32 queryIdLen, /* IN */ Lng32 no_of_stats_items, /* IN/OUT */ SQLSTATS_ITEM sqlstats_items[]) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetStatisticsItems(GetCliGlobals(), statsReqType, queryId, queryIdLen, no_of_stats_items, sqlstats_items ); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_RegisterQuery(SQLQUERY_ID *queryId, Lng32 fragId, Lng32 tdbId, Lng32 explainTdbId, short collectStatsType, Lng32 instNum, Lng32 tdbType, char *tdbName, Lng32 tdbNameLen) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_RegisterQuery(GetCliGlobals(), queryId, fragId, tdbId, explainTdbId, collectStatsType, instNum, tdbType, tdbName, tdbNameLen); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_DeregisterQuery(SQLQUERY_ID *queryId, Lng32 fragId) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_DeregisterQuery(GetCliGlobals(), queryId, fragId); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } // This method returns the pointer to the CLI ExStatistics area. // The returned pointer is a read only pointer, its contents cannot be // modified by the caller. Lng32 SQL_EXEC_GetStatisticsArea_Internal ( /* IN */ short statsReqType, /* IN */ char *statsReqStr, /* IN */ Lng32 statsReqStrLen, /* IN */ short activeQueryNum, /* IN */ short statsMergeType, /*INOUT*/ const ExStatisticsArea* &exStatsArea) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetStatisticsArea_Internal(GetCliGlobals(), statsReqType, statsReqStr, statsReqStrLen, activeQueryNum, statsMergeType, exStatsArea); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetChildQueryInfo( /*IN*/ SQLSTMT_ID * statement_id, /*INOUT*/ char * uniqueQueryId, /*IN */ Lng32 uniqueQueryIdMaxLen, /*INOUT*/ Lng32 * uniqueQueryIdLen, /*INOUT*/ SQL_QUERY_COST_INFO *query_cost_info, /*INOUT*/ SQL_QUERY_COMPILER_STATS_INFO *comp_stats_info) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetChildQueryInfo(GetCliGlobals(), statement_id, uniqueQueryId, uniqueQueryIdMaxLen, uniqueQueryIdLen, query_cost_info, comp_stats_info ); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_LOBcliInterface ( /*IN*/ char * inLobHandle, /*IN*/ Lng32 inLobHandleLen, /*IN*/ char * blackBox, /*IN*/ Int32* blackBoxLen, /*OUT*/ char * outLobHandle, /*OUT*/ Lng32 * outLobHandleLen, /*IN*/ LOBcliQueryType qType, /*IN*/ LOBcliQueryPhase qPhase, /*INOUT*/ Int64 * dataOffset, /* IN: for insert, OUT: for select */ /*INOUT*/ Int64 * dataLen, /* length of data. IN: for insert, out: for select */ /*OUT*/ Int64 * outDescPartnKey, /* returned after insert and select */ /*OUT*/ Int64 * outDescSyskey, /*INOUT*/ void* *inCliInterface, /*IN*/ Int64 xnId /* xn id of the parent process, if non-zero */ ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_LOBcliInterface(GetCliGlobals(), inLobHandle, inLobHandleLen, blackBox, blackBoxLen, outLobHandle, outLobHandleLen, qType, qPhase, dataOffset, dataLen, outDescPartnKey, outDescSyskey, inCliInterface, xnId); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Lng32 SQL_EXEC_LOB_GC_Interface ( /*IN*/ void *lobGlobals, // can be passed or NULL /*IN*/ char * handle, /*IN*/ Lng32 handleLen, /*IN*/ char* hdfsServer, /*IN*/ Lng32 hdfsPort, /*IN*/ char *lobLocation, /*IN*/ Int64 lobMaxMemChunkLen // if passed in as 0, will use default value of 1G for the in memory buffer to do compaction. ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_LOB_GC_Interface(GetCliGlobals(), lobGlobals, handle, handleLen, hdfsServer, hdfsPort,lobLocation, lobMaxMemChunkLen); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Lng32 SQL_EXEC_LOBddlInterface ( /*IN*/ char * schName, /*IN*/ Lng32 schNameLen, /*IN*/ Int64 objectUID, /*IN*/ Lng32 &numLOBs, /*IN*/ LOBcliQueryType qType, /*IN*/ short *lobNumList, /*IN*/ short *lobTypList, /*IN*/ char* *lobLocList, /*IN*/ char *hdfsServer, /*IN*/ Int32 hdfsPort, /*IN */ Int64 lobMaxSize ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_LOBddlInterface(GetCliGlobals(), schName, schNameLen, objectUID, numLOBs, qType, lobNumList, lobTypList, lobLocList, hdfsServer, hdfsPort, lobMaxSize); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Lng32 SQL_EXEC_LOBloader2sqlInterface ( /*IN*/ char * lobHandle, /*IN*/ Lng32 lobHandleLen, /*IN*/ char * lobInfo, /*IN*/ Lng32 lobInfoLen, /*IN*/ LOBcliQueryType qType, /*INOUT*/ char * dataLoc, /* IN: for load, OUT: for extract */ /*INOUT*/ Int64 &dataLen, /* length of data. 0 indicates EOD */ /*INOUT*/ void* *cliInterface /* INOUT: if returned, save it and pass it back in on the next call */ ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_LOBloader2sqlInterface(GetCliGlobals(), lobHandle, lobHandleLen, lobInfo, lobInfoLen, qType, dataLoc, dataLen, cliInterface); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Int32 SQL_EXEC_SWITCH_TO_COMPILER_TYPE ( /*IN*/ Int32 cmpCntxtType ) { Int32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SWITCH_TO_COMPILER_TYPE(GetCliGlobals(), cmpCntxtType); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Int32 SQL_EXEC_SWITCH_TO_COMPILER ( /*IN*/ void * cmpCntxt ) { Int32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SWITCH_TO_COMPILER(GetCliGlobals(), cmpCntxt); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Int32 SQL_EXEC_SWITCH_BACK_COMPILER ( ) { Int32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SWITCH_BACK_COMPILER(GetCliGlobals()); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Lng32 SQL_EXEC_SEcliInterface ( SECliQueryType qType, void* *cliInterface, const char * inStrParam1, const char * inStrParam2, int inIntParam1, int inIntParam2, char* *outStrParam1, char* *outStrParam2, Lng32 *outIntParam1 ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SEcliInterface(GetCliGlobals(), qType, cliInterface, inStrParam1, inStrParam2, inIntParam1, inIntParam2, outStrParam1, outStrParam2, outIntParam1); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } Lng32 SQL_EXEC_SeqGenCliInterface ( void* *cliInterface, void * seqGenAttrs ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_SeqGenCliInterface(GetCliGlobals(), cliInterface, seqGenAttrs); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_GetRoutine ( /* IN */ const char *serializedInvocationInfo, /* IN */ Int32 invocationInfoLen, /* IN */ const char *serializedPlanInfo, /* IN */ Int32 planInfoLen, /* IN */ Int32 language, /* IN */ Int32 paramStyle, /* IN */ const char *externalName, /* IN */ const char *containerName, /* IN */ const char *externalPath, /* IN */ const char *librarySqlName, /* OUT */ Int32 *handle ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetRoutine( GetCliGlobals(), serializedInvocationInfo, invocationInfoLen, serializedPlanInfo, planInfoLen, language, paramStyle, externalName, containerName, externalPath, librarySqlName, handle); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Int32 SQL_EXEC_InvokeRoutine ( /* IN */ Int32 handle, /* IN */ Int32 phaseEnumAsInt, /* IN */ const char *serializedInvocationInfo, /* IN */ Int32 invocationInfoLen, /* OUT */ Int32 *invocationInfoLenOut, /* IN */ const char *serializedPlanInfo, /* IN */ Int32 planInfoLen, /* IN */ Int32 planNum, /* OUT */ Int32 *planInfoLenOut, /* IN */ char *inputRow, /* IN */ Int32 inputRowLen, /* OUT */ char *outputRow, /* IN */ Int32 outputRowLen ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_InvokeRoutine( GetCliGlobals(), handle, phaseEnumAsInt, serializedInvocationInfo, invocationInfoLen, invocationInfoLenOut, serializedPlanInfo, planInfoLen, planNum, planInfoLenOut, inputRow, inputRowLen, outputRow, outputRowLen); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Int32 SQL_EXEC_GetRoutineInvocationInfo ( /* IN */ Int32 handle, /* IN/OUT */ char *serializedInvocationInfo, /* IN */ Int32 invocationInfoMaxLen, /* OUT */ Int32 *invocationInfoLenOut, /* IN/OUT */ char *serializedPlanInfo, /* IN */ Int32 planInfoMaxLen, /* IN */ Int32 planNum, /* OUT */ Int32 *planInfoLenOut ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_GetRoutineInvocationInfo( GetCliGlobals(), handle, serializedInvocationInfo, invocationInfoMaxLen, invocationInfoLenOut, serializedPlanInfo, planInfoMaxLen, planNum, planInfoLenOut); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } SQLCLI_LIB_FUNC Lng32 SQL_EXEC_PutRoutine ( /* IN */ Int32 handle ) { Lng32 retcode; CLISemaphore *tmpSemaphore; ContextCli *threadContext; CLI_NONPRIV_PROLOGUE(retcode); try { tmpSemaphore = getCliSemaphore(threadContext); tmpSemaphore->get(); threadContext->incrNumOfCliCalls(); retcode = SQLCLI_PutRoutine(GetCliGlobals(), handle); } catch(...) { retcode = -CLI_INTERNAL_ERROR; #if defined(_THROW_EXCEPTIONS) if (cliWillThrow()) { threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); throw; } #endif } threadContext->decrNumOfCliCalls(); tmpSemaphore->release(); RecordError(NULL, retcode); return retcode; } #ifdef __cplusplus } #endif
1
11,031
Is there any security issue here? If we integrate with Hive security (and I don't know if we have or not) is there some notion of re-authentication at connection time?
apache-trafodion
cpp
@@ -22,7 +22,7 @@ const illegalCommandFields = [ 'raw', 'readPreference', 'session', - 'writeConcern' + 'readConcern' ]; class CreateCollectionOperation extends CommandOperation {
1
'use strict'; const Aspect = require('./operation').Aspect; const defineAspects = require('./operation').defineAspects; const CommandOperation = require('./command'); const applyWriteConcern = require('../utils').applyWriteConcern; const handleCallback = require('../utils').handleCallback; const loadCollection = require('../dynamic_loaders').loadCollection; const MongoError = require('../core').MongoError; const ReadPreference = require('../core').ReadPreference; // Filter out any write concern options const illegalCommandFields = [ 'w', 'wtimeout', 'j', 'fsync', 'autoIndexId', 'strict', 'serializeFunctions', 'pkFactory', 'raw', 'readPreference', 'session', 'writeConcern' ]; class CreateCollectionOperation extends CommandOperation { constructor(db, name, options) { super(db, options); this.name = name; } _buildCommand() { const name = this.name; const options = this.options; // Create collection command const cmd = { create: name }; // Add all optional parameters for (let n in options) { if ( options[n] != null && typeof options[n] !== 'function' && illegalCommandFields.indexOf(n) === -1 ) { cmd[n] = options[n]; } } return cmd; } execute(callback) { const db = this.db; const name = this.name; const options = this.options; let Collection = loadCollection(); // Did the user destroy the topology if (db.serverConfig && db.serverConfig.isDestroyed()) { return callback(new MongoError('topology was destroyed')); } let listCollectionOptions = Object.assign({}, options, { nameOnly: true }); listCollectionOptions = applyWriteConcern(listCollectionOptions, { db }, listCollectionOptions); // Check if we have the name db .listCollections({ name }, listCollectionOptions) .setReadPreference(ReadPreference.PRIMARY) .toArray((err, collections) => { if (err != null) return handleCallback(callback, err, null); if (collections.length > 0 && listCollectionOptions.strict) { return handleCallback( callback, MongoError.create({ message: `Collection ${name} already exists. Currently in strict mode.`, driver: true }), null ); } else if (collections.length > 0) { try { return handleCallback( callback, null, new Collection(db, db.s.topology, db.databaseName, name, db.s.pkFactory, options) ); } catch (err) { return handleCallback(callback, err); } } // Execute command super.execute(err => { if (err) return handleCallback(callback, err); try { return handleCallback( callback, null, new Collection(db, db.s.topology, db.databaseName, name, db.s.pkFactory, options) ); } catch (err) { return handleCallback(callback, err); } }); }); } } defineAspects(CreateCollectionOperation, Aspect.WRITE_OPERATION); module.exports = CreateCollectionOperation;
1
15,657
Does this mean we do not support writeConcern on `createCollection`?
mongodb-node-mongodb-native
js
@@ -36,7 +36,7 @@ func TestProducer_RequestSessionDestroy(t *testing.T) { assert.NoError(t, err) destroySender := &fakeDestroySender{} - err = RequestSessionDestroy(destroySender, sid) + err = RequestSessionDestroy(destroySender, sid.ID) assert.NoError(t, err) }
1
/* * Copyright (C) 2018 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package session import ( "testing" "github.com/mysteriumnetwork/node/communication" "github.com/stretchr/testify/assert" ) var ( successfulSessionDestroyResponse = &DestroyResponse{ Success: true, } ) func TestProducer_RequestSessionDestroy(t *testing.T) { sender := &fakeSender{} sid, _, err := RequestSessionCreate(sender, 123, []byte{}, ConsumerInfo{}) assert.NoError(t, err) destroySender := &fakeDestroySender{} err = RequestSessionDestroy(destroySender, sid) assert.NoError(t, err) } type fakeDestroySender struct { } func (sender *fakeDestroySender) Send(producer communication.MessageProducer) error { return nil } func (sender *fakeDestroySender) Request(producer communication.RequestProducer) (responsePtr interface{}, err error) { return successfulSessionDestroyResponse, nil }
1
13,588
I guess `sid` was for `sessionID`, but now it looks strange: `sid.ID`.
mysteriumnetwork-node
go
@@ -379,7 +379,11 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) ( Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 6443, ToPort: 6443, - CidrBlocks: []string{anyIPv4CidrBlock}, + SourceSecurityGroupIDs: []string{ + s.scope.SecurityGroups()[infrav1.SecurityGroupAPIServerLB].ID, + s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID, + s.scope.SecurityGroups()[infrav1.SecurityGroupNode].ID, + }, }, { Description: "etcd",
1
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ec2 import ( "fmt" errlist "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/pkg/record" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors" ) const ( // IPProtocolTCP is how EC2 represents the TCP protocol in ingress rules IPProtocolTCP = "tcp" // IPProtocolUDP is how EC2 represents the UDP protocol in ingress rules IPProtocolUDP = "udp" // IPProtocolICMP is how EC2 represents the ICMP protocol in ingress rules IPProtocolICMP = "icmp" // IPProtocolICMPv6 is how EC2 represents the ICMPv6 protocol in ingress rules IPProtocolICMPv6 = "58" ) func (s *Service) reconcileSecurityGroups() error { s.scope.V(2).Info("Reconciling security groups") if s.scope.Network().SecurityGroups == nil { s.scope.Network().SecurityGroups = make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup) } sgs, err := s.describeSecurityGroupsByName() if err != nil { return err } // Declare all security group roles that the reconcile loop takes care of. roles := []infrav1.SecurityGroupRole{ infrav1.SecurityGroupBastion, infrav1.SecurityGroupAPIServerLB, infrav1.SecurityGroupLB, infrav1.SecurityGroupControlPlane, infrav1.SecurityGroupNode, } // First iteration makes sure that the security group are valid and fully created. for _, role := range roles { sg := s.getDefaultSecurityGroup(role) existing, ok := sgs[*sg.GroupName] if !ok { if err := s.createSecurityGroup(role, sg); err != nil { return err } s.scope.SecurityGroups()[role] = infrav1.SecurityGroup{ ID: *sg.GroupId, Name: *sg.GroupName, } s.scope.V(2).Info("Created security group for role", "role", role, "security-group", s.scope.SecurityGroups()[role]) continue } // TODO(vincepri): validate / update security group if necessary. s.scope.SecurityGroups()[role] = existing // Make sure tags are up to date. if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { if err := tags.Ensure(existing.Tags, &tags.ApplyParams{ EC2Client: s.scope.EC2, BuildParams: s.getSecurityGroupTagParams(existing.Name, existing.ID, role), }); err != nil { return false, err } return true, nil }, awserrors.GroupNotFound); err != nil { return errors.Wrapf(err, "failed to ensure tags on security group %q", existing.ID) } } // Second iteration creates or updates all permissions on the security group to match // the specified ingress rules. for role, sg := range s.scope.SecurityGroups() { if sg.Tags.HasAWSCloudProviderOwned(s.scope.Name()) { // skip rule reconciliation, as we expect the in-cluster cloud integration to manage them continue } current := sg.IngressRules want, err := s.getSecurityGroupIngressRules(role) if err != nil { return err } toRevoke := current.Difference(want) if len(toRevoke) > 0 { if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { if err := s.revokeSecurityGroupIngressRules(sg.ID, toRevoke); err != nil { return false, err } return true, nil }, awserrors.GroupNotFound); err != nil { return errors.Wrapf(err, "failed to revoke security group ingress rules for %q", sg.ID) } s.scope.V(2).Info("Revoked ingress rules from security group", "revoked-ingress-rules", toRevoke, "security-group-id", sg.ID) } toAuthorize := want.Difference(current) if len(toAuthorize) > 0 { if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { if err := s.authorizeSecurityGroupIngressRules(sg.ID, toAuthorize); err != nil { return false, err } return true, nil }, awserrors.GroupNotFound); err != nil { return err } s.scope.V(2).Info("Authorized ingress rules in security group", "authorized-ingress-rules", toAuthorize, "security-group-id", sg.ID) } } return nil } func (s *Service) deleteSecurityGroups() error { for _, sg := range s.scope.SecurityGroups() { current := sg.IngressRules if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { return err } s.scope.V(2).Info("Revoked ingress rules from security group", "revoked-ingress-rules", current, "security-group-id", sg.ID) } for _, sg := range s.scope.SecurityGroups() { s.deleteSecurityGroup(&sg, "managed") } clusterGroups, err := s.describeClusterOwnedSecurityGroups() if err != nil { return err } errs := []error{} for _, sg := range clusterGroups { if err := s.deleteSecurityGroup(&sg, "cluster managed"); err != nil { errs = append(errs, err) } } if len(errs) != 0 { return errlist.NewAggregate(errs) } return nil } func (s *Service) deleteSecurityGroup(sg *infrav1.SecurityGroup, typ string) error { input := &ec2.DeleteSecurityGroupInput{ GroupId: aws.String(sg.ID), } if _, err := s.scope.EC2.DeleteSecurityGroup(input); awserrors.IsIgnorableSecurityGroupError(err) != nil { record.Warnf(s.scope.AWSCluster, "FailedDeleteSecurityGroup", "Failed to delete %s SecurityGroup %q: %v", typ, sg.ID, err) return errors.Wrapf(err, "failed to delete security group %q", sg.ID) } record.Eventf(s.scope.AWSCluster, "SuccessfulDeleteSecurityGroup", "Deleted %s SecurityGroup %q", typ, sg.ID) s.scope.V(2).Info("Deleted security group", "security-group-id", sg.ID, "kind", typ) return nil } func (s *Service) describeClusterOwnedSecurityGroups() ([]infrav1.SecurityGroup, error) { input := &ec2.DescribeSecurityGroupsInput{ Filters: []*ec2.Filter{ filter.EC2.VPC(s.scope.VPC().ID), filter.EC2.ProviderOwned(s.scope.Name()), }, } groups := []infrav1.SecurityGroup{} err := s.scope.EC2.DescribeSecurityGroupsPages(input, func(out *ec2.DescribeSecurityGroupsOutput, last bool) bool { for _, group := range out.SecurityGroups { if group != nil { groups = append(groups, makeInfraSecurityGroup(group)) } } return true }) if err != nil { return nil, errors.Wrapf(err, "failed to describe cluster-owned security groups in vpc %q", s.scope.VPC().ID) } return groups, nil } func (s *Service) describeSecurityGroupsByName() (map[string]infrav1.SecurityGroup, error) { input := &ec2.DescribeSecurityGroupsInput{ Filters: []*ec2.Filter{ filter.EC2.VPC(s.scope.VPC().ID), filter.EC2.Cluster(s.scope.Name()), }, } out, err := s.scope.EC2.DescribeSecurityGroups(input) if err != nil { return nil, errors.Wrapf(err, "failed to describe security groups in vpc %q", s.scope.VPC().ID) } res := make(map[string]infrav1.SecurityGroup, len(out.SecurityGroups)) for _, ec2sg := range out.SecurityGroups { sg := makeInfraSecurityGroup(ec2sg) for _, ec2rule := range ec2sg.IpPermissions { sg.IngressRules = append(sg.IngressRules, ingressRuleFromSDKType(ec2rule)) } res[sg.Name] = sg } return res, nil } func makeInfraSecurityGroup(ec2sg *ec2.SecurityGroup) infrav1.SecurityGroup { return infrav1.SecurityGroup{ ID: *ec2sg.GroupId, Name: *ec2sg.GroupName, Tags: converters.TagsToMap(ec2sg.Tags), } } func (s *Service) createSecurityGroup(role infrav1.SecurityGroupRole, input *ec2.SecurityGroup) error { out, err := s.scope.EC2.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{ VpcId: input.VpcId, GroupName: input.GroupName, Description: aws.String(fmt.Sprintf("Kubernetes cluster %s: %s", s.scope.Name(), role)), }) if err != nil { record.Warnf(s.scope.AWSCluster, "FailedCreateSecurityGroup", "Failed to create managed SecurityGroup for Role %q: %v", role, err) return errors.Wrapf(err, "failed to create security group %q in vpc %q", *out.GroupId, *input.VpcId) } record.Eventf(s.scope.AWSCluster, "SuccessfulCreateSecurityGroup", "Created managed SecurityGroup %q for Role %q", *out.GroupId, role) // Set the group id. input.GroupId = out.GroupId // Tag the security group. if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { if _, err := s.scope.EC2.CreateTags(&ec2.CreateTagsInput{ Resources: []*string{out.GroupId}, Tags: input.Tags, }); err != nil { return false, err } return true, nil }, awserrors.GroupNotFound); err != nil { record.Warnf(s.scope.AWSCluster, "FailedTagSecurityGroup", "Failed to tag managed SecurityGroup %q: %v", *out.GroupId, err) return errors.Wrapf(err, "failed to tag security group %q in vpc %q", *out.GroupId, *input.VpcId) } record.Eventf(s.scope.AWSCluster, "SuccessfulTagSecurityGroup", "Tagged managed SecurityGroup %q", *out.GroupId) return nil } func (s *Service) authorizeSecurityGroupIngressRules(id string, rules infrav1.IngressRules) error { input := &ec2.AuthorizeSecurityGroupIngressInput{GroupId: aws.String(id)} for _, rule := range rules { input.IpPermissions = append(input.IpPermissions, ingressRuleToSDKType(rule)) } if _, err := s.scope.EC2.AuthorizeSecurityGroupIngress(input); err != nil { record.Warnf(s.scope.AWSCluster, "FailedAuthorizeSecurityGroupIngressRules", "Failed to authorize security group ingress rules %v for SecurityGroup %q: %v", rules, id, err) return errors.Wrapf(err, "failed to authorize security group %q ingress rules: %v", id, rules) } record.Eventf(s.scope.AWSCluster, "SuccessfulAuthorizeSecurityGroupIngressRules", "Authorized security group ingress rules %v for SecurityGroup %q", rules, id) return nil } func (s *Service) revokeSecurityGroupIngressRules(id string, rules infrav1.IngressRules) error { input := &ec2.RevokeSecurityGroupIngressInput{GroupId: aws.String(id)} for _, rule := range rules { input.IpPermissions = append(input.IpPermissions, ingressRuleToSDKType(rule)) } if _, err := s.scope.EC2.RevokeSecurityGroupIngress(input); err != nil { record.Warnf(s.scope.AWSCluster, "FailedRevokeSecurityGroupIngressRules", "Failed to revoke security group ingress rules %v for SecurityGroup %q: %v", rules, id, err) return errors.Wrapf(err, "failed to revoke security group %q ingress rules: %v", id, rules) } record.Eventf(s.scope.AWSCluster, "SuccessfulRevokeSecurityGroupIngressRules", "Revoked security group ingress rules %v for SecurityGroup %q", rules, id) return nil } func (s *Service) revokeAllSecurityGroupIngressRules(id string) error { describeInput := &ec2.DescribeSecurityGroupsInput{GroupIds: []*string{aws.String(id)}} securityGroups, err := s.scope.EC2.DescribeSecurityGroups(describeInput) if err != nil { return errors.Wrapf(err, "failed to query security group %q", id) } for _, sg := range securityGroups.SecurityGroups { if len(sg.IpPermissions) > 0 { revokeInput := &ec2.RevokeSecurityGroupIngressInput{ GroupId: aws.String(id), IpPermissions: sg.IpPermissions, } if _, err := s.scope.EC2.RevokeSecurityGroupIngress(revokeInput); err != nil { record.Warnf(s.scope.AWSCluster, "FailedRevokeSecurityGroupIngressRules", "Failed to revoke all security group ingress rules for SecurityGroup %q: %v", *sg.GroupId, err) return errors.Wrapf(err, "failed to revoke security group %q ingress rules", id) } record.Eventf(s.scope.AWSCluster, "SuccessfulRevokeSecurityGroupIngressRules", "Revoked all security group ingress rules for SecurityGroup %q", *sg.GroupId) } } return nil } func (s *Service) defaultSSHIngressRule(sourceSecurityGroupID string) *infrav1.IngressRule { return &infrav1.IngressRule{ Description: "SSH", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 22, ToPort: 22, SourceSecurityGroupIDs: []string{sourceSecurityGroupID}, } } func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (infrav1.IngressRules, error) { switch role { case infrav1.SecurityGroupBastion: return infrav1.IngressRules{ { Description: "SSH", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 22, ToPort: 22, CidrBlocks: []string{anyIPv4CidrBlock}, }, }, nil case infrav1.SecurityGroupControlPlane: return infrav1.IngressRules{ s.defaultSSHIngressRule(s.scope.SecurityGroups()[infrav1.SecurityGroupBastion].ID), { Description: "Kubernetes API", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 6443, ToPort: 6443, CidrBlocks: []string{anyIPv4CidrBlock}, }, { Description: "etcd", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 2379, ToPort: 2379, SourceSecurityGroupIDs: []string{s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID}, }, { Description: "etcd peer", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 2380, ToPort: 2380, SourceSecurityGroupIDs: []string{s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID}, }, { Description: "bgp (calico)", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 179, ToPort: 179, SourceSecurityGroupIDs: []string{ s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID, s.scope.SecurityGroups()[infrav1.SecurityGroupNode].ID, }, }, { Description: "IP-in-IP (calico)", Protocol: infrav1.SecurityGroupProtocolIPinIP, FromPort: -1, ToPort: 65535, SourceSecurityGroupIDs: []string{ s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID, s.scope.SecurityGroups()[infrav1.SecurityGroupNode].ID, }, }, }, nil case infrav1.SecurityGroupNode: return infrav1.IngressRules{ s.defaultSSHIngressRule(s.scope.SecurityGroups()[infrav1.SecurityGroupBastion].ID), { Description: "Node Port Services", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 30000, ToPort: 32767, CidrBlocks: []string{anyIPv4CidrBlock}, }, { Description: "Kubelet API", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 10250, ToPort: 10250, SourceSecurityGroupIDs: []string{ s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID, // This is needed to support metrics-server deployments s.scope.SecurityGroups()[infrav1.SecurityGroupNode].ID, }, }, { Description: "bgp (calico)", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: 179, ToPort: 179, SourceSecurityGroupIDs: []string{ s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID, s.scope.SecurityGroups()[infrav1.SecurityGroupNode].ID, }, }, { Description: "IP-in-IP (calico)", Protocol: infrav1.SecurityGroupProtocolIPinIP, FromPort: -1, ToPort: 65535, SourceSecurityGroupIDs: []string{ s.scope.SecurityGroups()[infrav1.SecurityGroupNode].ID, s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID, }, }, }, nil case infrav1.SecurityGroupAPIServerLB: return infrav1.IngressRules{ { Description: "Kubernetes API", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: int64(s.scope.APIServerPort()), ToPort: int64(s.scope.APIServerPort()), CidrBlocks: []string{anyIPv4CidrBlock}, }, }, nil case infrav1.SecurityGroupLB: // We hand this group off to the in-cluster cloud provider, so these rules aren't used return infrav1.IngressRules{}, nil } return nil, errors.Errorf("Cannot determine ingress rules for unknown security group role %q", role) } func (s *Service) getSecurityGroupName(clusterName string, role infrav1.SecurityGroupRole) string { return fmt.Sprintf("%s-%v", clusterName, role) } func (s *Service) getDefaultSecurityGroup(role infrav1.SecurityGroupRole) *ec2.SecurityGroup { name := s.getSecurityGroupName(s.scope.Name(), role) return &ec2.SecurityGroup{ GroupName: aws.String(name), VpcId: aws.String(s.scope.VPC().ID), Tags: converters.MapToTags(infrav1.Build(s.getSecurityGroupTagParams(name, "", role))), } } func (s *Service) getSecurityGroupTagParams(name string, id string, role infrav1.SecurityGroupRole) infrav1.BuildParams { additional := s.scope.AdditionalTags() if role == infrav1.SecurityGroupLB { additional[infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())] = string(infrav1.ResourceLifecycleOwned) } return infrav1.BuildParams{ ClusterName: s.scope.Name(), Lifecycle: infrav1.ResourceLifecycleOwned, Name: aws.String(name), ResourceID: id, Role: aws.String(string(role)), Additional: additional, } } func ingressRuleToSDKType(i *infrav1.IngressRule) (res *ec2.IpPermission) { // AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but // we avoid serializing it out for clarity's sake. // See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html switch i.Protocol { case infrav1.SecurityGroupProtocolTCP, infrav1.SecurityGroupProtocolUDP, infrav1.SecurityGroupProtocolICMP, infrav1.SecurityGroupProtocolICMPv6: res = &ec2.IpPermission{ IpProtocol: aws.String(string(i.Protocol)), FromPort: aws.Int64(i.FromPort), ToPort: aws.Int64(i.ToPort), } default: res = &ec2.IpPermission{ IpProtocol: aws.String(string(i.Protocol)), } } for _, cidr := range i.CidrBlocks { ipRange := &ec2.IpRange{ CidrIp: aws.String(cidr), } if i.Description != "" { ipRange.Description = aws.String(i.Description) } res.IpRanges = append(res.IpRanges, ipRange) } for _, groupID := range i.SourceSecurityGroupIDs { userIDGroupPair := &ec2.UserIdGroupPair{ GroupId: aws.String(groupID), } if i.Description != "" { userIDGroupPair.Description = aws.String(i.Description) } res.UserIdGroupPairs = append(res.UserIdGroupPairs, userIDGroupPair) } return res } func ingressRuleFromSDKType(v *ec2.IpPermission) (res *infrav1.IngressRule) { // Ports are only well-defined for TCP and UDP protocols, but EC2 overloads the port range // in the case of ICMP(v6) traffic to indicate which codes are allowed. For all other protocols, // including the custom "-1" All Traffic protcol, FromPort and ToPort are omitted from the response. // See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html switch *v.IpProtocol { case IPProtocolTCP, IPProtocolUDP, IPProtocolICMP, IPProtocolICMPv6: res = &infrav1.IngressRule{ Protocol: infrav1.SecurityGroupProtocol(*v.IpProtocol), FromPort: *v.FromPort, ToPort: *v.ToPort, } default: res = &infrav1.IngressRule{ Protocol: infrav1.SecurityGroupProtocol(*v.IpProtocol), } } for _, ec2range := range v.IpRanges { if ec2range.Description != nil && *ec2range.Description != "" { res.Description = *ec2range.Description } res.CidrBlocks = append(res.CidrBlocks, *ec2range.CidrIp) } for _, pair := range v.UserIdGroupPairs { if pair.GroupId == nil { continue } if pair.Description != nil && *pair.Description != "" { res.Description = *pair.Description } res.SourceSecurityGroupIDs = append(res.SourceSecurityGroupIDs, *pair.GroupId) } return res }
1
13,129
We'll need `infrav1.SecurityGroupBastion` as well
kubernetes-sigs-cluster-api-provider-aws
go
@@ -1470,6 +1470,15 @@ func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) ( return dst, nil } +// PublicLink adds a "readable by anyone with link" permission on the given file or folder. +func PublicLink(f fs.Fs, fileName string) (string, error) { + doPublicLink := f.Features().PublicLink + if doPublicLink == nil { + return "", errors.Errorf("%v doesn't support public links", f) + } + return doPublicLink(fileName) +} + // Rmdirs removes any empty directories (or directories only // containing empty directories) under f, including f. func Rmdirs(f fs.Fs, dir string, leaveRoot bool) error {
1
// Package operations does generic operations on filesystems and objects package operations import ( "bytes" "fmt" "io" "io/ioutil" "log" "path" "sort" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/accounting" "github.com/ncw/rclone/fs/config" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/march" "github.com/ncw/rclone/fs/object" "github.com/ncw/rclone/fs/walk" "github.com/ncw/rclone/lib/readers" "github.com/pkg/errors" "github.com/spf13/pflag" "golang.org/x/net/context" ) // CheckHashes checks the two files to see if they have common // known hash types and compares them // // Returns // // equal - which is equality of the hashes // // hash - the HashType. This is HashNone if either of the hashes were // unset or a compatible hash couldn't be found. // // err - may return an error which will already have been logged // // If an error is returned it will return equal as false func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) { common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) // fs.Debugf(nil, "Shared hashes: %v", common) if common.Count() == 0 { return true, hash.None, nil } ht = common.GetOne() srcHash, err := src.Hash(ht) if err != nil { fs.CountError(err) fs.Errorf(src, "Failed to calculate src hash: %v", err) return false, ht, err } if srcHash == "" { return true, hash.None, nil } dstHash, err := dst.Hash(ht) if err != nil { fs.CountError(err) fs.Errorf(dst, "Failed to calculate dst hash: %v", err) return false, ht, err } if dstHash == "" { return true, hash.None, nil } if srcHash != dstHash { fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs()) fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs()) } return srcHash == dstHash, ht, nil } // Equal checks to see if the src and dst objects are equal by looking at // size, mtime and hash // // If the src and dst size are different then it is considered to be // not equal. If --size-only is in effect then this is the only check // that is done. If --ignore-size is in effect then this check is // skipped and the files are considered the same size. // // If the size is the same and the mtime is the same then it is // considered to be equal. This check is skipped if using --checksum. // // If the size is the same and mtime is different, unreadable or // --checksum is set and the hash is the same then the file is // considered to be equal. In this case the mtime on the dst is // updated if --checksum is not set. // // Otherwise the file is considered to be not equal including if there // were errors reading info. func Equal(src fs.ObjectInfo, dst fs.Object) bool { return equal(src, dst, fs.Config.SizeOnly, fs.Config.CheckSum) } // sizeDiffers compare the size of src and dst taking into account the // various ways of ignoring sizes func sizeDiffers(src, dst fs.ObjectInfo) bool { if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 { return false } return src.Size() != dst.Size() } func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { if sizeDiffers(src, dst) { fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) return false } if sizeOnly { fs.Debugf(src, "Sizes identical") return true } // Assert: Size is equal or being ignored // If checking checksum and not modtime if checkSum { // Check the hash same, ht, _ := CheckHashes(src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false } if ht == hash.None { fs.Debugf(src, "Size of src and dst objects identical") } else { fs.Debugf(src, "Size and %v of src and dst objects identical", ht) } return true } // Sizes the same so check the mtime if fs.Config.ModifyWindow == fs.ModTimeNotSupported { fs.Debugf(src, "Sizes identical") return true } srcModTime := src.ModTime() dstModTime := dst.ModTime() dt := dstModTime.Sub(srcModTime) ModifyWindow := fs.Config.ModifyWindow if dt < ModifyWindow && dt > -ModifyWindow { fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow) return true } fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime) // Check if the hashes are the same same, ht, _ := CheckHashes(src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false } if ht == hash.None { // if couldn't check hash, return that they differ return false } // mod time differs but hash is the same to reset mod time if required if !fs.Config.NoUpdateModTime { if fs.Config.DryRun { fs.Logf(src, "Not updating modification time as --dry-run") } else { // Size and hash the same but mtime different // Error if objects are treated as immutable if fs.Config.Immutable { fs.Errorf(dst, "Timestamp mismatch between immutable objects") return false } // Update the mtime of the dst object here err := dst.SetModTime(srcModTime) if err == fs.ErrorCantSetModTime { fs.Debugf(dst, "src and dst identical but can't set mod time without re-uploading") return false } else if err == fs.ErrorCantSetModTimeWithoutDelete { fs.Debugf(dst, "src and dst identical but can't set mod time without deleting and re-uploading") // Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file // put in the BackupDir than deleted which is what will happen if we don't delete it. if fs.Config.BackupDir == "" { err = dst.Remove() if err != nil { fs.Errorf(dst, "failed to delete before re-upload: %v", err) } } return false } else if err != nil { fs.CountError(err) fs.Errorf(dst, "Failed to set modification time: %v", err) } else { fs.Infof(src, "Updated modification time in destination") } } } return true } // Used to remove a failed copy // // Returns whether the file was succesfully removed or not func removeFailedCopy(dst fs.Object) bool { if dst == nil { return false } fs.Infof(dst, "Removing failed copy") removeErr := dst.Remove() if removeErr != nil { fs.Infof(dst, "Failed to remove failed copy: %s", removeErr) return false } return true } // Wrapper to override the remote for an object type overrideRemoteObject struct { fs.Object remote string } // Remote returns the overriden remote name func (o *overrideRemoteObject) Remote() string { return o.remote } // MimeType returns the mime type of the underlying object or "" if it // can't be worked out func (o *overrideRemoteObject) MimeType() string { if do, ok := o.Object.(fs.MimeTyper); ok { return do.MimeType() } return "" } // Check interface is satisfied var _ fs.MimeTyper = (*overrideRemoteObject)(nil) // Copy src object to dst or f if nil. If dst is nil then it uses // remote as the name of the new object. // // It returns the destination object if possible. Note that this may // be nil. func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { newDst = dst if fs.Config.DryRun { fs.Logf(src, "Not copying as --dry-run") return newDst, nil } maxTries := fs.Config.LowLevelRetries tries := 0 doUpdate := dst != nil // work out which hash to use - limit to 1 hash in common var common hash.Set hashType := hash.None if !fs.Config.SizeOnly { common = src.Fs().Hashes().Overlap(f.Hashes()) if common.Count() > 0 { hashType = common.GetOne() common = hash.Set(hashType) } } hashOption := &fs.HashesOption{Hashes: common} var actionTaken string for { // Try server side copy first - if has optional interface and // is same underlying remote actionTaken = "Copied (server side copy)" if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) { newDst, err = doCopy(src, remote) if err == nil { dst = newDst } } else { err = fs.ErrorCantCopy } // If can't server side copy, do it manually if err == fs.ErrorCantCopy { var in0 io.ReadCloser in0, err = src.Open(hashOption) if err != nil { err = errors.Wrap(err, "failed to open source object") } else { in := accounting.NewAccount(in0, src).WithBuffer() // account and buffer the transfer var wrappedSrc fs.ObjectInfo = src // We try to pass the original object if possible if src.Remote() != remote { wrappedSrc = &overrideRemoteObject{Object: src, remote: remote} } if doUpdate { actionTaken = "Copied (replaced existing)" err = dst.Update(in, wrappedSrc, hashOption) } else { actionTaken = "Copied (new)" dst, err = f.Put(in, wrappedSrc, hashOption) } closeErr := in.Close() if err == nil { newDst = dst err = closeErr } } } tries++ if tries >= maxTries { break } // Retry if err returned a retry error if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries) continue } // otherwise finish break } if err != nil { fs.CountError(err) fs.Errorf(src, "Failed to copy: %v", err) return newDst, err } // Verify sizes are the same after transfer if sizeDiffers(src, dst) { err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size()) fs.Errorf(dst, "%v", err) fs.CountError(err) removeFailedCopy(dst) return newDst, err } // Verify hashes are the same after transfer - ignoring blank hashes // TODO(klauspost): This could be extended, so we always create a hash type matching // the destination, and calculate it while sending. if hashType != hash.None { var srcSum string srcSum, err = src.Hash(hashType) if err != nil { fs.CountError(err) fs.Errorf(src, "Failed to read src hash: %v", err) } else if srcSum != "" { var dstSum string dstSum, err = dst.Hash(hashType) if err != nil { fs.CountError(err) fs.Errorf(dst, "Failed to read hash: %v", err) } else if !fs.Config.IgnoreChecksum && !hash.Equals(srcSum, dstSum) { err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum) fs.Errorf(dst, "%v", err) fs.CountError(err) removeFailedCopy(dst) return newDst, err } } } fs.Infof(src, actionTaken) return newDst, err } // Move src object to dst or fdst if nil. If dst is nil then it uses // remote as the name of the new object. // // It returns the destination object if possible. Note that this may // be nil. func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { newDst = dst if fs.Config.DryRun { fs.Logf(src, "Not moving as --dry-run") return newDst, nil } // See if we have Move available if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) { // Delete destination if it exists if dst != nil { err = DeleteFile(dst) if err != nil { return newDst, err } } // Move dst <- src newDst, err = doMove(src, remote) switch err { case nil: fs.Infof(src, "Moved (server side)") return newDst, nil case fs.ErrorCantMove: fs.Debugf(src, "Can't move, switching to copy") default: fs.CountError(err) fs.Errorf(src, "Couldn't move: %v", err) return newDst, err } } // Move not found or didn't work so copy dst <- src newDst, err = Copy(fdst, dst, remote, src) if err != nil { fs.Errorf(src, "Not deleting source as copy failed: %v", err) return newDst, err } // Delete src if no error on copy return newDst, DeleteFile(src) } // CanServerSideMove returns true if fdst support server side moves or // server side copies // // Some remotes simulate rename by server-side copy and delete, so include // remotes that implements either Mover or Copier. func CanServerSideMove(fdst fs.Fs) bool { canMove := fdst.Features().Move != nil canCopy := fdst.Features().Copy != nil return canMove || canCopy } // DeleteFileWithBackupDir deletes a single file respecting --dry-run // and accumulating stats and errors. // // If backupDir is set then it moves the file to there instead of // deleting func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) { accounting.Stats.Checking(dst.Remote()) numDeletes := accounting.Stats.Deletes(1) if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete { return fserrors.FatalError(errors.New("--max-delete threshold reached")) } action, actioned, actioning := "delete", "Deleted", "deleting" if backupDir != nil { action, actioned, actioning = "move into backup dir", "Moved into backup dir", "moving into backup dir" } if fs.Config.DryRun { fs.Logf(dst, "Not %s as --dry-run", actioning) } else if backupDir != nil { if !SameConfig(dst.Fs(), backupDir) { err = errors.New("parameter to --backup-dir has to be on the same remote as destination") } else { remoteWithSuffix := dst.Remote() + fs.Config.Suffix overwritten, _ := backupDir.NewObject(remoteWithSuffix) _, err = Move(backupDir, overwritten, remoteWithSuffix, dst) } } else { err = dst.Remove() } if err != nil { fs.CountError(err) fs.Errorf(dst, "Couldn't %s: %v", action, err) } else if !fs.Config.DryRun { fs.Infof(dst, actioned) } accounting.Stats.DoneChecking(dst.Remote()) return err } // DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors. // // If useBackupDir is set and --backup-dir is in effect then it moves // the file to there instead of deleting func DeleteFile(dst fs.Object) (err error) { return DeleteFileWithBackupDir(dst, nil) } // DeleteFilesWithBackupDir removes all the files passed in the // channel // // If backupDir is set the files will be placed into that directory // instead of being deleted. func DeleteFilesWithBackupDir(toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { var wg sync.WaitGroup wg.Add(fs.Config.Transfers) var errorCount int32 var fatalErrorCount int32 for i := 0; i < fs.Config.Transfers; i++ { go func() { defer wg.Done() for dst := range toBeDeleted { err := DeleteFileWithBackupDir(dst, backupDir) if err != nil { atomic.AddInt32(&errorCount, 1) if fserrors.IsFatalError(err) { fs.Errorf(nil, "Got fatal error on delete: %s", err) atomic.AddInt32(&fatalErrorCount, 1) return } } } }() } fs.Infof(nil, "Waiting for deletions to finish") wg.Wait() if errorCount > 0 { err := errors.Errorf("failed to delete %d files", errorCount) if fatalErrorCount > 0 { return fserrors.FatalError(err) } return err } return nil } // DeleteFiles removes all the files passed in the channel func DeleteFiles(toBeDeleted fs.ObjectsChan) error { return DeleteFilesWithBackupDir(toBeDeleted, nil) } // Read a Objects into add() for the given Fs. // dir is the start directory, "" for root // If includeAll is specified all files will be added, // otherwise only files passing the filter will be added. // // Each object is passed ito the function provided. If that returns // an error then the listing will be aborted and that error returned. func readFilesFn(f fs.Fs, includeAll bool, dir string, add func(fs.Object) error) (err error) { return walk.Walk(f, "", includeAll, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { return err } return entries.ForObjectError(add) }) } // SameConfig returns true if fdst and fsrc are using the same config // file entry func SameConfig(fdst, fsrc fs.Info) bool { return fdst.Name() == fsrc.Name() } // Same returns true if fdst and fsrc point to the same underlying Fs func Same(fdst, fsrc fs.Info) bool { return SameConfig(fdst, fsrc) && fdst.Root() == fsrc.Root() } // Overlapping returns true if fdst and fsrc point to the same // underlying Fs and they overlap. func Overlapping(fdst, fsrc fs.Info) bool { if !SameConfig(fdst, fsrc) { return false } // Return the Root with a trailing / if not empty fixedRoot := func(f fs.Info) string { s := strings.Trim(f.Root(), "/") if s != "" { s += "/" } return s } fdstRoot := fixedRoot(fdst) fsrcRoot := fixedRoot(fsrc) return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot) } // checkIdentical checks to see if dst and src are identical // // it returns true if differences were found // it also returns whether it couldn't be hashed func checkIdentical(dst, src fs.Object) (differ bool, noHash bool) { same, ht, err := CheckHashes(src, dst) if err != nil { // CheckHashes will log and count errors return true, false } if ht == hash.None { return false, true } if !same { err = errors.Errorf("%v differ", ht) fs.Errorf(src, "%v", err) fs.CountError(err) return true, false } return false, false } // checkFn is the the type of the checking function used in CheckFn() type checkFn func(a, b fs.Object) (differ bool, noHash bool) // checkMarch is used to march over two Fses in the same way as // sync/copy type checkMarch struct { fdst, fsrc fs.Fs check checkFn differences int32 noHashes int32 srcFilesMissing int32 dstFilesMissing int32 } // DstOnly have an object which is in the destination only func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) { switch dst.(type) { case fs.Object: err := errors.Errorf("File not in %v", c.fsrc) fs.Errorf(dst, "%v", err) fs.CountError(err) atomic.AddInt32(&c.differences, 1) atomic.AddInt32(&c.srcFilesMissing, 1) case fs.Directory: // Do the same thing to the entire contents of the directory return true default: panic("Bad object in DirEntries") } return false } // SrcOnly have an object which is in the source only func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) { switch src.(type) { case fs.Object: err := errors.Errorf("File not in %v", c.fdst) fs.Errorf(src, "%v", err) fs.CountError(err) atomic.AddInt32(&c.differences, 1) atomic.AddInt32(&c.dstFilesMissing, 1) case fs.Directory: // Do the same thing to the entire contents of the directory return true default: panic("Bad object in DirEntries") } return false } // check to see if two objects are identical using the check function func (c *checkMarch) checkIdentical(dst, src fs.Object) (differ bool, noHash bool) { accounting.Stats.Checking(src.Remote()) defer accounting.Stats.DoneChecking(src.Remote()) if sizeDiffers(src, dst) { err := errors.Errorf("Sizes differ") fs.Errorf(src, "%v", err) fs.CountError(err) return true, false } if fs.Config.SizeOnly { return false, false } return c.check(dst, src) } // Match is called when src and dst are present, so sync src to dst func (c *checkMarch) Match(dst, src fs.DirEntry) (recurse bool) { switch srcX := src.(type) { case fs.Object: dstX, ok := dst.(fs.Object) if ok { differ, noHash := c.checkIdentical(dstX, srcX) if differ { atomic.AddInt32(&c.differences, 1) } else { fs.Debugf(dstX, "OK") } if noHash { atomic.AddInt32(&c.noHashes, 1) } } else { err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst) fs.Errorf(src, "%v", err) fs.CountError(err) atomic.AddInt32(&c.differences, 1) atomic.AddInt32(&c.dstFilesMissing, 1) } case fs.Directory: // Do the same thing to the entire contents of the directory _, ok := dst.(fs.Directory) if ok { return true } err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc) fs.Errorf(dst, "%v", err) fs.CountError(err) atomic.AddInt32(&c.differences, 1) atomic.AddInt32(&c.srcFilesMissing, 1) default: panic("Bad object in DirEntries") } return false } // CheckFn checks the files in fsrc and fdst according to Size and // hash using checkFunction on each file to check the hashes. // // checkFunction sees if dst and src are identical // // it returns true if differences were found // it also returns whether it couldn't be hashed func CheckFn(fdst, fsrc fs.Fs, check checkFn) error { c := &checkMarch{ fdst: fdst, fsrc: fsrc, check: check, } // set up a march over fdst and fsrc m := march.New(context.Background(), fdst, fsrc, "", c) fs.Infof(fdst, "Waiting for checks to finish") m.Run() if c.dstFilesMissing > 0 { fs.Logf(fdst, "%d files missing", c.dstFilesMissing) } if c.srcFilesMissing > 0 { fs.Logf(fsrc, "%d files missing", c.srcFilesMissing) } fs.Logf(fdst, "%d differences found", accounting.Stats.GetErrors()) if c.noHashes > 0 { fs.Logf(fdst, "%d hashes could not be checked", c.noHashes) } if c.differences > 0 { return errors.Errorf("%d differences found", c.differences) } return nil } // Check the files in fsrc and fdst according to Size and hash func Check(fdst, fsrc fs.Fs) error { return CheckFn(fdst, fsrc, checkIdentical) } // CheckEqualReaders checks to see if in1 and in2 have the same // content when read. // // it returns true if differences were found func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) { const bufSize = 64 * 1024 buf1 := make([]byte, bufSize) buf2 := make([]byte, bufSize) for { n1, err1 := readers.ReadFill(in1, buf1) n2, err2 := readers.ReadFill(in2, buf2) // check errors if err1 != nil && err1 != io.EOF { return true, err1 } else if err2 != nil && err2 != io.EOF { return true, err2 } // err1 && err2 are nil or io.EOF here // process the data if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) { return true, nil } // if both streams finished the we have finished if err1 == io.EOF && err2 == io.EOF { break } } return false, nil } // CheckIdentical checks to see if dst and src are identical by // reading all their bytes if necessary. // // it returns true if differences were found func CheckIdentical(dst, src fs.Object) (differ bool, err error) { in1, err := dst.Open() if err != nil { return true, errors.Wrapf(err, "failed to open %q", dst) } in1 = accounting.NewAccount(in1, dst).WithBuffer() // account and buffer the transfer defer fs.CheckClose(in1, &err) in2, err := src.Open() if err != nil { return true, errors.Wrapf(err, "failed to open %q", src) } in2 = accounting.NewAccount(in2, src).WithBuffer() // account and buffer the transfer defer fs.CheckClose(in2, &err) return CheckEqualReaders(in1, in2) } // CheckDownload checks the files in fsrc and fdst according to Size // and the actual contents of the files. func CheckDownload(fdst, fsrc fs.Fs) error { check := func(a, b fs.Object) (differ bool, noHash bool) { differ, err := CheckIdentical(a, b) if err != nil { fs.CountError(err) fs.Errorf(a, "Failed to download: %v", err) return true, true } return differ, false } return CheckFn(fdst, fsrc, check) } // ListFn lists the Fs to the supplied function // // Lists in parallel which may get them out of order func ListFn(f fs.Fs, fn func(fs.Object)) error { return walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { // FIXME count errors and carry on for listing return err } entries.ForObject(fn) return nil }) } // mutex for synchronized output var outMutex sync.Mutex // Synchronized fmt.Fprintf // // Ignores errors from Fprintf func syncFprintf(w io.Writer, format string, a ...interface{}) { outMutex.Lock() defer outMutex.Unlock() _, _ = fmt.Fprintf(w, format, a...) } // List the Fs to the supplied writer // // Shows size and path - obeys includes and excludes // // Lists in parallel which may get them out of order func List(f fs.Fs, w io.Writer) error { return ListFn(f, func(o fs.Object) { syncFprintf(w, "%9d %s\n", o.Size(), o.Remote()) }) } // ListLong lists the Fs to the supplied writer // // Shows size, mod time and path - obeys includes and excludes // // Lists in parallel which may get them out of order func ListLong(f fs.Fs, w io.Writer) error { return ListFn(f, func(o fs.Object) { accounting.Stats.Checking(o.Remote()) modTime := o.ModTime() accounting.Stats.DoneChecking(o.Remote()) syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) }) } // Md5sum list the Fs to the supplied writer // // Produces the same output as the md5sum command - obeys includes and // excludes // // Lists in parallel which may get them out of order func Md5sum(f fs.Fs, w io.Writer) error { return hashLister(hash.MD5, f, w) } // Sha1sum list the Fs to the supplied writer // // Obeys includes and excludes // // Lists in parallel which may get them out of order func Sha1sum(f fs.Fs, w io.Writer) error { return hashLister(hash.SHA1, f, w) } // DropboxHashSum list the Fs to the supplied writer // // Obeys includes and excludes // // Lists in parallel which may get them out of order func DropboxHashSum(f fs.Fs, w io.Writer) error { return hashLister(hash.Dropbox, f, w) } // hashSum returns the human readable hash for ht passed in. This may // be UNSUPPORTED or ERROR. func hashSum(ht hash.Type, o fs.Object) string { accounting.Stats.Checking(o.Remote()) sum, err := o.Hash(ht) accounting.Stats.DoneChecking(o.Remote()) if err == hash.ErrUnsupported { sum = "UNSUPPORTED" } else if err != nil { fs.Debugf(o, "Failed to read %v: %v", ht, err) sum = "ERROR" } return sum } func hashLister(ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(f, func(o fs.Object) { sum := hashSum(ht, o) syncFprintf(w, "%*s %s\n", hash.Width[ht], sum, o.Remote()) }) } // Count counts the objects and their sizes in the Fs // // Obeys includes and excludes func Count(f fs.Fs) (objects int64, size int64, err error) { err = ListFn(f, func(o fs.Object) { atomic.AddInt64(&objects, 1) atomic.AddInt64(&size, o.Size()) }) return } // ConfigMaxDepth returns the depth to use for a recursive or non recursive listing. func ConfigMaxDepth(recursive bool) int { depth := fs.Config.MaxDepth if !recursive && depth < 0 { depth = 1 } return depth } // ListDir lists the directories/buckets/containers in the Fs to the supplied writer func ListDir(f fs.Fs, w io.Writer) error { return walk.Walk(f, "", false, ConfigMaxDepth(false), func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { // FIXME count errors and carry on for listing return err } entries.ForDir(func(dir fs.Directory) { if dir != nil { syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) } }) return nil }) } // Mkdir makes a destination directory or container func Mkdir(f fs.Fs, dir string) error { if fs.Config.DryRun { fs.Logf(fs.LogDirName(f, dir), "Not making directory as dry run is set") return nil } fs.Debugf(fs.LogDirName(f, dir), "Making directory") err := f.Mkdir(dir) if err != nil { fs.CountError(err) return err } return nil } // TryRmdir removes a container but not if not empty. It doesn't // count errors but may return one. func TryRmdir(f fs.Fs, dir string) error { if fs.Config.DryRun { fs.Logf(fs.LogDirName(f, dir), "Not deleting as dry run is set") return nil } fs.Debugf(fs.LogDirName(f, dir), "Removing directory") return f.Rmdir(dir) } // Rmdir removes a container but not if not empty func Rmdir(f fs.Fs, dir string) error { err := TryRmdir(f, dir) if err != nil { fs.CountError(err) return err } return err } // Purge removes a directory and all of its contents func Purge(f fs.Fs, dir string) error { doFallbackPurge := true var err error if dir == "" { // FIXME change the Purge interface so it takes a dir - see #1891 if doPurge := f.Features().Purge; doPurge != nil { doFallbackPurge = false if fs.Config.DryRun { fs.Logf(f, "Not purging as --dry-run set") } else { err = doPurge() if err == fs.ErrorCantPurge { doFallbackPurge = true } } } } if doFallbackPurge { // DeleteFiles and Rmdir observe --dry-run err = DeleteFiles(listToChan(f, dir)) if err != nil { return err } err = Rmdirs(f, "", false) } if err != nil { fs.CountError(err) return err } return nil } // Delete removes all the contents of a container. Unlike Purge, it // obeys includes and excludes. func Delete(f fs.Fs) error { delete := make(fs.ObjectsChan, fs.Config.Transfers) delErr := make(chan error, 1) go func() { delErr <- DeleteFiles(delete) }() err := ListFn(f, func(o fs.Object) { delete <- o }) close(delete) delError := <-delErr if err == nil { err = delError } return err } // dedupeRename renames the objs slice to different names func dedupeRename(remote string, objs []fs.Object) { f := objs[0].Fs() doMove := f.Features().Move if doMove == nil { log.Fatalf("Fs %v doesn't support Move", f) } ext := path.Ext(remote) base := remote[:len(remote)-len(ext)] for i, o := range objs { newName := fmt.Sprintf("%s-%d%s", base, i+1, ext) if !fs.Config.DryRun { newObj, err := doMove(o, newName) if err != nil { fs.CountError(err) fs.Errorf(o, "Failed to rename: %v", err) continue } fs.Infof(newObj, "renamed from: %v", o) } else { fs.Logf(remote, "Not renaming to %q as --dry-run", newName) } } } // dedupeDeleteAllButOne deletes all but the one in keep func dedupeDeleteAllButOne(keep int, remote string, objs []fs.Object) { for i, o := range objs { if i == keep { continue } _ = DeleteFile(o) } fs.Logf(remote, "Deleted %d extra copies", len(objs)-1) } // dedupeDeleteIdentical deletes all but one of identical (by hash) copies func dedupeDeleteIdentical(remote string, objs []fs.Object) []fs.Object { // See how many of these duplicates are identical byHash := make(map[string][]fs.Object, len(objs)) for _, o := range objs { md5sum, err := o.Hash(hash.MD5) if err == nil { byHash[md5sum] = append(byHash[md5sum], o) } } // Delete identical duplicates, refilling obj with the ones remaining objs = nil for md5sum, hashObjs := range byHash { if len(hashObjs) > 1 { fs.Logf(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum) for _, o := range hashObjs[1:] { _ = DeleteFile(o) } } objs = append(objs, hashObjs[0]) } return objs } // dedupeInteractive interactively dedupes the slice of objects func dedupeInteractive(remote string, objs []fs.Object) { fmt.Printf("%s: %d duplicates remain\n", remote, len(objs)) for i, o := range objs { md5sum, err := o.Hash(hash.MD5) if err != nil { md5sum = err.Error() } fmt.Printf(" %d: %12d bytes, %s, md5sum %32s\n", i+1, o.Size(), o.ModTime().Format("2006-01-02 15:04:05.000000000"), md5sum) } switch config.Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) { case 's': case 'k': keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs)) dedupeDeleteAllButOne(keep-1, remote, objs) case 'r': dedupeRename(remote, objs) } } type objectsSortedByModTime []fs.Object func (objs objectsSortedByModTime) Len() int { return len(objs) } func (objs objectsSortedByModTime) Swap(i, j int) { objs[i], objs[j] = objs[j], objs[i] } func (objs objectsSortedByModTime) Less(i, j int) bool { return objs[i].ModTime().Before(objs[j].ModTime()) } // DeduplicateMode is how the dedupe command chooses what to do type DeduplicateMode int // Deduplicate modes const ( DeduplicateInteractive DeduplicateMode = iota // interactively ask the user DeduplicateSkip // skip all conflicts DeduplicateFirst // choose the first object DeduplicateNewest // choose the newest object DeduplicateOldest // choose the oldest object DeduplicateRename // rename the objects ) func (x DeduplicateMode) String() string { switch x { case DeduplicateInteractive: return "interactive" case DeduplicateSkip: return "skip" case DeduplicateFirst: return "first" case DeduplicateNewest: return "newest" case DeduplicateOldest: return "oldest" case DeduplicateRename: return "rename" } return "unknown" } // Set a DeduplicateMode from a string func (x *DeduplicateMode) Set(s string) error { switch strings.ToLower(s) { case "interactive": *x = DeduplicateInteractive case "skip": *x = DeduplicateSkip case "first": *x = DeduplicateFirst case "newest": *x = DeduplicateNewest case "oldest": *x = DeduplicateOldest case "rename": *x = DeduplicateRename default: return errors.Errorf("Unknown mode for dedupe %q.", s) } return nil } // Type of the value func (x *DeduplicateMode) Type() string { return "string" } // Check it satisfies the interface var _ pflag.Value = (*DeduplicateMode)(nil) // dedupeFindDuplicateDirs scans f for duplicate directories func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) { duplicateDirs := [][]fs.Directory{} err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { return err } dirs := map[string][]fs.Directory{} entries.ForDir(func(d fs.Directory) { dirs[d.Remote()] = append(dirs[d.Remote()], d) }) for _, ds := range dirs { if len(ds) > 1 { duplicateDirs = append(duplicateDirs, ds) } } return nil }) if err != nil { return nil, errors.Wrap(err, "find duplicate dirs") } return duplicateDirs, nil } // dedupeMergeDuplicateDirs merges all the duplicate directories found func dedupeMergeDuplicateDirs(f fs.Fs, duplicateDirs [][]fs.Directory) error { mergeDirs := f.Features().MergeDirs if mergeDirs == nil { return errors.Errorf("%v: can't merge directories", f) } dirCacheFlush := f.Features().DirCacheFlush if dirCacheFlush == nil { return errors.Errorf("%v: can't flush dir cache", f) } for _, dirs := range duplicateDirs { if !fs.Config.DryRun { fs.Infof(dirs[0], "Merging contents of duplicate directories") err := mergeDirs(dirs) if err != nil { return errors.Wrap(err, "merge duplicate dirs") } } else { fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run") } } dirCacheFlush() return nil } // Deduplicate interactively finds duplicate files and offers to // delete all but one or rename them to be different. Only useful with // Google Drive which can have duplicate file names. func Deduplicate(f fs.Fs, mode DeduplicateMode) error { fs.Infof(f, "Looking for duplicates using %v mode.", mode) // Find duplicate directories first and fix them - repeat // until all fixed for { duplicateDirs, err := dedupeFindDuplicateDirs(f) if err != nil { return err } if len(duplicateDirs) == 0 { break } err = dedupeMergeDuplicateDirs(f, duplicateDirs) if err != nil { return err } if fs.Config.DryRun { break } } // Now find duplicate files files := map[string][]fs.Object{} err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { return err } entries.ForObject(func(o fs.Object) { remote := o.Remote() files[remote] = append(files[remote], o) }) return nil }) if err != nil { return err } for remote, objs := range files { if len(objs) > 1 { fs.Logf(remote, "Found %d duplicates - deleting identical copies", len(objs)) objs = dedupeDeleteIdentical(remote, objs) if len(objs) <= 1 { fs.Logf(remote, "All duplicates removed") continue } switch mode { case DeduplicateInteractive: dedupeInteractive(remote, objs) case DeduplicateFirst: dedupeDeleteAllButOne(0, remote, objs) case DeduplicateNewest: sort.Sort(objectsSortedByModTime(objs)) // sort oldest first dedupeDeleteAllButOne(len(objs)-1, remote, objs) case DeduplicateOldest: sort.Sort(objectsSortedByModTime(objs)) // sort oldest first dedupeDeleteAllButOne(0, remote, objs) case DeduplicateRename: dedupeRename(remote, objs) case DeduplicateSkip: // skip default: //skip } } } return nil } // listToChan will transfer all objects in the listing to the output // // If an error occurs, the error will be logged, and it will close the // channel. // // If the error was ErrorDirNotFound then it will be ignored func listToChan(f fs.Fs, dir string) fs.ObjectsChan { o := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(o) _ = walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { if err == fs.ErrorDirNotFound { return nil } err = errors.Errorf("Failed to list: %v", err) fs.CountError(err) fs.Errorf(nil, "%v", err) return nil } entries.ForObject(func(obj fs.Object) { o <- obj }) return nil }) }() return o } // CleanUp removes the trash for the Fs func CleanUp(f fs.Fs) error { doCleanUp := f.Features().CleanUp if doCleanUp == nil { return errors.Errorf("%v doesn't support cleanup", f) } if fs.Config.DryRun { fs.Logf(f, "Not running cleanup as --dry-run set") return nil } return doCleanUp() } // wrap a Reader and a Closer together into a ReadCloser type readCloser struct { io.Reader io.Closer } // Cat any files to the io.Writer // // if offset == 0 it will be ignored // if offset > 0 then the file will be seeked to that offset // if offset < 0 then the file will be seeked that far from the end // // if count < 0 then it will be ignored // if count >= 0 then only that many characters will be output func Cat(f fs.Fs, w io.Writer, offset, count int64) error { var mu sync.Mutex return ListFn(f, func(o fs.Object) { var err error accounting.Stats.Transferring(o.Remote()) defer func() { accounting.Stats.DoneTransferring(o.Remote(), err == nil) }() opt := fs.RangeOption{Start: offset, End: -1} size := o.Size() if opt.Start < 0 { opt.Start += size } if count >= 0 { opt.End = opt.Start + count - 1 } var options []fs.OpenOption if opt.Start > 0 || opt.End >= 0 { options = append(options, &opt) } in, err := o.Open(options...) if err != nil { fs.CountError(err) fs.Errorf(o, "Failed to open: %v", err) return } if count >= 0 { in = &readCloser{Reader: &io.LimitedReader{R: in, N: count}, Closer: in} // reduce remaining size to count if size > count { size = count } } in = accounting.NewAccountSizeName(in, size, o.Remote()).WithBuffer() // account and buffer the transfer defer func() { err = in.Close() if err != nil { fs.CountError(err) fs.Errorf(o, "Failed to close: %v", err) } }() // take the lock just before we output stuff, so at the last possible moment mu.Lock() defer mu.Unlock() _, err = io.Copy(w, in) if err != nil { fs.CountError(err) fs.Errorf(o, "Failed to send to output: %v", err) } }) } // Rcat reads data from the Reader until EOF and uploads it to a file on remote func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { accounting.Stats.Transferring(dstFileName) in = accounting.NewAccountSizeName(in, -1, dstFileName).WithBuffer() defer func() { accounting.Stats.DoneTransferring(dstFileName, err == nil) if otherErr := in.Close(); otherErr != nil { fs.Debugf(fdst, "Rcat: failed to close source: %v", err) } }() hashOption := &fs.HashesOption{Hashes: fdst.Hashes()} hash, err := hash.NewMultiHasherTypes(fdst.Hashes()) if err != nil { return nil, err } readCounter := readers.NewCountingReader(in) trackingIn := io.TeeReader(readCounter, hash) compare := func(dst fs.Object) error { src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst) if !Equal(src, dst) { err = errors.Errorf("corrupted on transfer") fs.CountError(err) fs.Errorf(dst, "%v", err) return err } return nil } // check if file small enough for direct upload buf := make([]byte, fs.Config.StreamingUploadCutoff) if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF { fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n) src := object.NewMemoryObject(dstFileName, modTime, buf[:n]) return Copy(fdst, nil, dstFileName, src) } // Make a new ReadCloser with the bits we've already read in = &readCloser{ Reader: io.MultiReader(bytes.NewReader(buf), trackingIn), Closer: in, } fStreamTo := fdst canStream := fdst.Features().PutStream != nil if !canStream { fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file") tmpLocalFs, err := fs.TemporaryLocalFs() if err != nil { return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file") } defer func() { err := Purge(tmpLocalFs, "") if err != nil { fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err) } }() fStreamTo = tmpLocalFs } if fs.Config.DryRun { fs.Logf("stdin", "Not uploading as --dry-run") // prevents "broken pipe" errors _, err = io.Copy(ioutil.Discard, in) return nil, err } objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil) if dst, err = fStreamTo.Features().PutStream(in, objInfo, hashOption); err != nil { return dst, err } if err = compare(dst); err != nil { return dst, err } if !canStream { // copy dst (which is the local object we have just streamed to) to the remote return Copy(fdst, nil, dstFileName, dst) } return dst, nil } // Rmdirs removes any empty directories (or directories only // containing empty directories) under f, including f. func Rmdirs(f fs.Fs, dir string, leaveRoot bool) error { dirEmpty := make(map[string]bool) dirEmpty[""] = !leaveRoot err := walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { fs.CountError(err) fs.Errorf(f, "Failed to list %q: %v", dirPath, err) return nil } for _, entry := range entries { switch x := entry.(type) { case fs.Directory: // add a new directory as empty dir := x.Remote() _, found := dirEmpty[dir] if !found { dirEmpty[dir] = true } case fs.Object: // mark the parents of the file as being non-empty dir := x.Remote() for dir != "" { dir = path.Dir(dir) if dir == "." || dir == "/" { dir = "" } empty, found := dirEmpty[dir] // End if we reach a directory which is non-empty if found && !empty { break } dirEmpty[dir] = false } } } return nil }) if err != nil { return errors.Wrap(err, "failed to rmdirs") } // Now delete the empty directories, starting from the longest path var toDelete []string for dir, empty := range dirEmpty { if empty { toDelete = append(toDelete, dir) } } sort.Strings(toDelete) for i := len(toDelete) - 1; i >= 0; i-- { dir := toDelete[i] err := TryRmdir(f, dir) if err != nil { fs.CountError(err) fs.Errorf(dir, "Failed to rmdir: %v", err) return err } } return nil } // NeedTransfer checks to see if src needs to be copied to dst using // the current config. // // Returns a flag which indicates whether the file needs to be // transferred or not. func NeedTransfer(dst, src fs.Object) bool { if dst == nil { fs.Debugf(src, "Couldn't find file - need to transfer") return true } // If we should ignore existing files, don't transfer if fs.Config.IgnoreExisting { fs.Debugf(src, "Destination exists, skipping") return false } // If we should upload unconditionally if fs.Config.IgnoreTimes { fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use") return true } // If UpdateOlder is in effect, skip if dst is newer than src if fs.Config.UpdateOlder { srcModTime := src.ModTime() dstModTime := dst.ModTime() dt := dstModTime.Sub(srcModTime) // If have a mutually agreed precision then use that modifyWindow := fs.Config.ModifyWindow if modifyWindow == fs.ModTimeNotSupported { // Otherwise use 1 second as a safe default as // the resolution of the time a file was // uploaded. modifyWindow = time.Second } switch { case dt >= modifyWindow: fs.Debugf(src, "Destination is newer than source, skipping") return false case dt <= -modifyWindow: fs.Debugf(src, "Destination is older than source, transferring") default: if src.Size() == dst.Size() { fs.Debugf(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow) return false } fs.Debugf(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow) } } else { // Check to see if changed or not if Equal(src, dst) { fs.Debugf(src, "Unchanged skipping") return false } } return true } // moveOrCopyFile moves or copies a single file possibly to a new name func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { dstFilePath := path.Join(fdst.Root(), dstFileName) srcFilePath := path.Join(fsrc.Root(), srcFileName) if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName) return nil } // Choose operations Op := Move if cp { Op = Copy } // Find src object srcObj, err := fsrc.NewObject(srcFileName) if err != nil { return err } // Find dst object if it exists dstObj, err := fdst.NewObject(dstFileName) if err == fs.ErrorObjectNotFound { dstObj = nil } else if err != nil { return err } if NeedTransfer(dstObj, srcObj) { accounting.Stats.Transferring(srcFileName) _, err = Op(fdst, dstObj, dstFileName, srcObj) accounting.Stats.DoneTransferring(srcFileName, err == nil) } else { accounting.Stats.Checking(srcFileName) if !cp { err = DeleteFile(srcObj) } defer accounting.Stats.DoneChecking(srcFileName) } return err } // MoveFile moves a single file possibly to a new name func MoveFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, false) } // CopyFile moves a single file possibly to a new name func CopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, true) } // ListFormat defines files information print format type ListFormat struct { separator string dirSlash bool output []func() string entry fs.DirEntry hash bool } // SetSeparator changes separator in struct func (l *ListFormat) SetSeparator(separator string) { l.separator = separator } // SetDirSlash defines if slash should be printed func (l *ListFormat) SetDirSlash(dirSlash bool) { l.dirSlash = dirSlash } // SetOutput sets functions used to create files information func (l *ListFormat) SetOutput(output []func() string) { l.output = output } // AddModTime adds file's Mod Time to output func (l *ListFormat) AddModTime() { l.AppendOutput(func() string { return l.entry.ModTime().Format("2006-01-02 15:04:05") }) } // AddSize adds file's size to output func (l *ListFormat) AddSize() { l.AppendOutput(func() string { return strconv.FormatInt(l.entry.Size(), 10) }) } // AddPath adds path to file to output func (l *ListFormat) AddPath() { l.AppendOutput(func() string { _, isDir := l.entry.(fs.Directory) if isDir && l.dirSlash { return l.entry.Remote() + "/" } return l.entry.Remote() }) } // AddHash adds the hash of the type given to the output func (l *ListFormat) AddHash(ht hash.Type) { l.AppendOutput(func() string { o, ok := l.entry.(fs.Object) if !ok { return "" } return hashSum(ht, o) }) } // AppendOutput adds string generated by specific function to printed output func (l *ListFormat) AppendOutput(functionToAppend func() string) { if len(l.output) > 0 { l.output = append(l.output, func() string { return l.separator }) } l.output = append(l.output, functionToAppend) } // ListFormatted prints information about specific file in specific format func ListFormatted(entry *fs.DirEntry, list *ListFormat) string { list.entry = *entry var out string for _, fun := range list.output { out += fun() } return out }
1
6,962
This `fileName` should probably be `remote` and be relative to the Fs root as per normal rclone usage.
rclone-rclone
go